def twitter_bot():
# Only allocate part of the gpu memory when predicting.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
consumer_key = os.getenv("consumer_key")
consumer_secret = os.getenv("consumer_secret")
access_token = os.getenv("access_token")
access_token_secret = os.getenv("access_token_secret")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
with tf.Session(config=tf_config) as sess:
predictor = predict.EasyPredictor(sess)
for tweet in tweets():
status_id, status, bot_flag = tweet
print("Processing {0}...".format(status.text))
screen_name = status.author.screen_name
replies = predictor.predict(status.text)
if not replies:
print("no reply")
continue
reply_body = replies[0]
if reply_body is None:
print("No reply predicted")
else:
try:
post_reply(api, bot_flag, reply_body, screen_name, status_id)
except tweepy.TweepError as e:
# duplicate status
if e.api_code == 187:
pass
else:
raise
mark_tweet_processed(status_id)
tweet_replyer.py 文件源码
python
阅读 29
收藏 0
点赞 0
评论 0
评论列表
文章目录