def tweetMeaning(self,term):
self.dbout = self.searcher(term)
with open("data/words.json") as filedata:
self.wordList = json.load(filedata)
threading.Thread(target=self.dis.spinner, args=("Analysing Tweets ",)).start()
self.tweetList = []
for self.i in self.dbout:
self.procounter = 0
self.negcounter = 0
for self.word in nltk.word_tokenize(self.i["tweet"]):
#print("Analysing word: "+self.word)
try:
if nltk.PorterStemmer().stem(self.word) in self.wordList["good"]:
#print("Found good world")
self.procounter = + 1
if nltk.PorterStemmer().stem(self.word) in self.wordList["bad"]:
#print("Found bad world")
self.negcounter = + 1
# if nltk.PorterStemmer().stem(self.word) in self.wordList["swear"]:
# print("Found bad world")
# self.negcounter = + 1
else:
self.neucounter = + 1
except IndexError:
print("Ignoring tweet:",self.i["tweet"])
self.view = "unknown"
if self.procounter > self.negcounter:
self.view = "pro"
if self.negcounter > self.procounter:
self.view = "neg"
self.tweetDict = {
"id": self.i["_id"],
"tweet": self.i["tweet"],
"procount": self.procounter,
"negcount": self.negcounter,
# "view":"pro" if self.procounter > self.negcounter else "neg"
"view": self.view
}
self.tweetList.append(self.tweetDict)
self.dis.stop()
return self.tweetList
# This method gets the poll data from the JSON file it is
# stored in, ii then adds them up to get a total.
评论列表
文章目录