def __init__(self, room_url):
"""
:param room_url: ???url
"""
# Process.__init__(self)
# ???url
self.room_url = room_url
# ??
self.site_domain = urlparse(self.room_url)[1]
# ???
self.room_id = urlparse(self.room_url)[2].replace('/', '')
# ????
self.config = utils.load_config()
# Logger
self.logger = utils.get_logger()
if self.site_domain == 'live.bilibili.com':
self.room = BiliBiliLive(self.room_id)
elif self.site_domain == 'www.panda.tv':
self.room = PandaTVLive(self.room_id)
elif self.site_domain == 'www.huomao.com':
self.room = HuoMaoLive(self.room_id)
elif self.site_domain == 'www.zhanqi.tv':
self.room = ZhanqiLive(self.room_id)
python类get_logger()的实例源码
def start():
if len(sys.argv[1:]) == 0:
config = utils.load_config()
else:
config = utils.load_config(sys.argv[1])
logger = utils.get_logger()
logger.info('????')
room_count = len(config['ROOM_URLS'])
if room_count == 0:
logger.info('?????????????')
exit(0)
pool = ThreadPool(room_count)
for room_url in config['ROOM_URLS']:
m = Monitor(room_url)
pool.apply_async(m.run)
pool.close()
try:
pool.join()
except KeyboardInterrupt:
logger.warning('????')
exit(1)
def evaluate_line():
config = load_config(FLAGS.config_file)
logger = get_logger(FLAGS.log_file)
# limit GPU memory
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with open(FLAGS.map_file, "rb") as f:
char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
with tf.Session(config=tf_config) as sess:
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
while True:
# try:
# line = input("???????:")
# result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
# print(result)
# except Exception as e:
# logger.info(e)
line = input("???????:")
result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
print(result)
def evaluate_line():
config = load_config(FLAGS.config_file)
logger = get_logger(FLAGS.log_file)
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with open(FLAGS.map_file, "rb") as f:
char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
with tf.Session(config=tf_config) as sess:
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
while True:
# try:
# line = input("???????:")
# result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
# print(result)
# except Exception as e:
# logger.info(e)
line = input("???????:")
result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
print(result)
def build_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False, init_mean_value=None):
N = maxnum
L = maxlen
logger = get_logger("Build model")
logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, lstm_units = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
opts.lstm_units, opts.dropout, opts.l2_value))
word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
drop_x = Dropout(opts.dropout, name='drop_x')(x)
resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)
z = TimeDistributed(LSTM(opts.lstm_units, return_sequences=True), name='z')(resh_W)
avg_z = TimeDistributed(GlobalAveragePooling1D(), name='avg_z')(z)
hz = LSTM(opts.lstm_units, return_sequences=True, name='hz')(avg_z)
# TODO, random drop sentences
drop_hz = Dropout(opts.dropout, name='drop_hz')(hz)
avg_hz = GlobalAveragePooling1D(name='avg_hz')(drop_hz)
y = Dense(output_dim=1, activation='sigmoid', name='output')(avg_hz)
model = Model(input=word_input, output=y)
if opts.init_bias and init_mean_value:
logger.info("Initialise output layer bias with log(y_mean/1-y_mean)")
bias_value = (np.log(init_mean_value) - np.log(1 - init_mean_value)).astype(K.floatx())
model.layers[-1].b.set_value(bias_value)
if verbose:
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='rmsprop')
total_time = time.time() - start_time
logger.info("Model compiled in %.4f s" % total_time)
return model
def build_bidirectional_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False, init_mean_value=None):
N = maxnum
L = maxlen
logger = get_logger("Build bidirectional model")
logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, lstm_units = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
opts.lstm_units, opts.dropout, opts.l2_value))
word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
drop_x = Dropout(opts.dropout, name='drop_x')(x)
resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)
z_fwd = TimeDistributed(LSTM(opts.lstm_units, return_sequences=True), name='z_fwd')(resh_W)
z_bwd = TimeDistributed(LSTM(opts.lstm_units, return_sequences=True, go_backwards=True), name='z_bwd')(resh_W)
z_merged = merge([z_fwd, z_bwd], mode='concat', name='z_merged')
avg_z = TimeDistributed(GlobalAveragePooling1D(), name='avg_z')(z_merged)
hz_fwd = LSTM(opts.lstm_units, return_sequences=True, name='hz_fwd')(avg_z)
hz_bwd = LSTM(opts.lstm_units, return_sequences=True, go_backwards=True, name='hz_bwd')(avg_z)
hz_merged = merge([hz_fwd, hz_bwd], mode='concat', name='hz_merged')
# avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)
avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz_merged)
y = Dense(output_dim=1, activation='sigmoid', name='output')(avg_hz)
model = Model(input=word_input, output=y)
if opts.init_bias and init_mean_value:
logger.info("Initialise output layer bias with log(y_mean/1-y_mean)")
bias_value = (np.log(init_mean_value) - np.log(1 - init_mean_value)).astype(K.floatx())
model.layers[-1].b.set_value(bias_value)
if verbose:
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='rmsprop')
total_time = time.time() - start_time
logger.info("Model compiled in %.4f s" % total_time)
return model
def build_attention_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False, init_mean_value=None):
N = maxnum
L = maxlen
logger = get_logger('Build attention pooling model')
logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, lstm_units = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
opts.lstm_units, opts.dropout, opts.l2_value))
word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
drop_x = Dropout(opts.dropout, name='drop_x')(x)
resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)
z = TimeDistributed(LSTM(opts.lstm_units, return_sequences=True), name='z')(resh_W)
avg_z = TimeDistributed(GlobalAveragePooling1D(), name='avg_z')(z)
hz = LSTM(opts.lstm_units, return_sequences=True, name='hz')(avg_z)
# avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)
# avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz)
attent_hz = Attention(name='attent_hz')(hz)
y = Dense(output_dim=1, activation='sigmoid', name='output')(attent_hz)
model = Model(input=word_input, output=y)
if opts.init_bias and init_mean_value:
logger.info("Initialise output layer bias with log(y_mean/1-y_mean)")
bias_value = (np.log(init_mean_value) - np.log(1 - init_mean_value)).astype(K.floatx())
model.layers[-1].b.set_value(bias_value)
if verbose:
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='rmsprop')
total_time = time.time() - start_time
logger.info("Model compiled in %.4f s" % total_time)
return model
def build_attention2_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False, init_mean_value=None):
N = maxnum
L = maxlen
logger = get_logger('Build attention pooling model')
logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, lstm_units = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
opts.lstm_units, opts.dropout, opts.l2_value))
word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
drop_x = Dropout(opts.dropout, name='drop_x')(x)
resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)
z = TimeDistributed(LSTM(opts.lstm_units, return_sequences=True), name='z')(resh_W)
att_z = TimeDistributed(Attention(name='att_z'))(z)
hz = LSTM(opts.lstm_units, return_sequences=True, name='hz')(att_z)
# avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)
# avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz)
attent_hz = Attention(name='attent_hz')(hz)
y = Dense(output_dim=1, activation='sigmoid', name='output')(attent_hz)
model = Model(input=word_input, output=y)
if opts.init_bias and init_mean_value:
logger.info("Initialise output layer bias with log(y_mean/1-y_mean)")
bias_value = (np.log(init_mean_value) - np.log(1 - init_mean_value)).astype(K.floatx())
model.layers[-1].b.set_value(bias_value)
if verbose:
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='rmsprop')
total_time = time.time() - start_time
logger.info("Model compiled in %.4f s" % total_time)
return model
def __init__(self, name, keep_growing=True):
self.__name = name
self.instance2index = {}
self.instances = []
self.keep_growing = keep_growing
# Index 0 is occupied by default, all else following. I believe this is to hold unk variables
self.default_index = 0
self.next_index = 1
self.logger = utils.get_logger('Alphabet')