def duplicate_checker(x): ##TODO: implement the files with hash table; especially as the list grows really large??
with open('linkedin_rec_people.txt') as f, open('linkedin_data_collection.txt') as b:
w = b.readlines()
b = []
for line in w: ##TODO: highly inefficient... is their a better way to compare without including last word???
line = line.split()
line = line[:-1]
b.append(' '.join(line))
for line in fileinput.input('linkedin_rec_people.txt', inplace=True):
if x in w:
line = line.replace(x, '')
sys.stdout.write(line)
fileinput.close()
return True
return False
linkedin_collect_url.py 文件源码
python
阅读 23
收藏 0
点赞 0
评论 0
评论列表
文章目录