def get_named_set(lang_codes, feature_set):
if feature_set == 'id':
return get_id_set(lang_codes)
if feature_set not in FEATURE_SETS:
print("ERROR: Invalid feature set " + feature_set, file=sys.stderr)
sys.exit()
filename, source, prefix = FEATURE_SETS[feature_set]
feature_database = np.load(filename)
lang_codes = [ get_language_code(l, feature_database) for l in lang_codes ]
lang_indices = [ get_language_index(l, feature_database) for l in lang_codes ]
feature_names = get_feature_names(prefix, feature_database)
feature_indices = [ get_feature_index(f, feature_database) for f in feature_names ]
source_index = get_source_index(source, feature_database)
feature_values = feature_database["data"][lang_indices,:,:][:,feature_indices,:][:,:,source_index]
feature_values = feature_values.squeeze(axis=2)
return feature_names, feature_values
python类stderr()的实例源码
def convert_image(inpath, outpath, size):
"""Convert an image file using `sips`.
Args:
inpath (str): Path of source file.
outpath (str): Path to destination file.
size (int): Width and height of destination image in pixels.
Raises:
RuntimeError: Raised if `sips` exits with non-zero status.
"""
cmd = [
b'sips',
b'-z', b'{0}'.format(size), b'{0}'.format(size),
inpath,
b'--out', outpath]
# log().debug(cmd)
with open(os.devnull, 'w') as pipe:
retcode = subprocess.call(cmd, stdout=pipe, stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError('sips exited with {0}'.format(retcode))
def print_exception(etype, value, tb, limit=None, file=None):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
if tb:
_print(file, 'Traceback (most recent call last):')
print_tb(tb, limit, file)
lines = format_exception_only(etype, value)
for line in lines:
_print(file, line, '')
def build(self, file):
if self.built:
raise PermissionError("You cannot build multiple times!")
if not self.loaded:
self.load(file)
old = os.getcwd()
sys.path.append(os.path.dirname(os.path.abspath(file))) # for module import that aren't "include" call
try:
content = open(file, "rb").read()
os.chdir(os.path.dirname(os.path.abspath(file))) # set the current working directory, for open() etc.
exec(compile(content, file, 'exec'), self.user_functions)
except Exception as err:
print("An exception occured while building: ", file=sys.stderr)
lines = traceback.format_exc(None, err).splitlines()
print(" " + lines[-1], file=sys.stderr)
for l in lines[3:-1]:
print(l, file=sys.stderr)
exit(1)
os.chdir(old)
sys.path.remove(os.path.dirname(os.path.abspath(file)))
self.built = True
def load(self, file):
if self.loaded:
return
sys.path.append(os.path.dirname(os.path.abspath(file))) # for module import that aren't "include" call
old = os.getcwd()
try:
content = open(file, "rb").read()
os.chdir(os.path.dirname(os.path.abspath(file))) # set the current working directory, for open() etc.
exec(compile(content, file, 'exec'), self.user_functions)
except Exception as err:
print("An exception occured while loading: ", file=sys.stderr)
lines = traceback.format_exc(None, err).splitlines()
print(" " + lines[-1], file=sys.stderr)
for l in lines[3:-1]:
print(l, file=sys.stderr)
exit(1)
os.chdir(old)
sys.path.remove(os.path.dirname(os.path.abspath(file)))
self.loaded = True
self.mem_offset = 0
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
subprocess.call(command)
except OSError as e:
if e.errno == errno.ENOENT:
if level:
message = "{}: {}".format(level, message)
message = "juju-log: {}".format(message)
print(message, file=sys.stderr)
else:
raise
def generate2():
"""
Call an external Python 2 program to retrieve the AST symbols of that
language version
:return:
"""
import subprocess as sp
import tempfile, shutil, sys, traceback
tempdir = tempfile.mkdtemp()
tempfile = os.path.join(tempdir, "py2_ast_code.py")
py2_proc_out = ""
try:
with open(tempfile, 'w') as py2code:
py2code.write(generate_str + WRITESYMS_CODE)
py2_proc_out = sp.check_output(["python2", tempfile]).decode()
finally:
try:
shutil.rmtree(tempdir)
except:
print("Warning: error trying to delete the temporal directory:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return set(py2_proc_out.splitlines())
def check_response(self, response):
if type(response) is not dict:
self.error('Bad response : ' + str(response))
status = response.get('response_code', -1)
if status == 204:
self.error('VirusTotal api rate limit exceeded (Status 204).')
if status != 200:
self.error('Bad status : ' + str(status))
results = response.get('results', {})
if 'verbose_msg' in results:
print >> sys.stderr, str(results.get('verbose_msg'))
return results
# 0 => not found
# -2 => in queue
# 1 => ready
def _get_embedding_layer(self, embedding_file=None):
if self.embedding_layer is None:
if embedding_file is None:
if not self.tune_embedding:
print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
self.tune_embedding = True
embedding = None
else:
# Put the embedding in a list for Keras to treat it as initiali weights of the embedding
# layer.
embedding = [self.data_processor.get_embedding_matrix(embedding_file, onto_aware=False)]
vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
self.embedding_layer = Embedding(input_dim=vocab_size, output_dim=self.embed_dim,
weights=embedding, trainable=self.tune_embedding,
mask_zero=True, name="embedding")
return self.embedding_layer
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
input_dim = input_shape[-1]
reader_input_shape = self.get_reader_input_shape(input_shape)
print >>sys.stderr, "NSE reader input shape:", reader_input_shape
writer_input_shape = (input_shape[0], 1, self.output_dim * 2) # Will process one timestep at a time
print >>sys.stderr, "NSE writer input shape:", writer_input_shape
composer_input_shape = self.get_composer_input_shape(input_shape)
print >>sys.stderr, "NSE composer input shape:", composer_input_shape
self.reader.build(reader_input_shape)
self.writer.build(writer_input_shape)
self.composer.build(composer_input_shape)
# Aggregate weights of individual components for this layer.
reader_weights = self.reader.trainable_weights
writer_weights = self.writer.trainable_weights
composer_weights = self.composer.trainable_weights
self.trainable_weights = reader_weights + writer_weights + composer_weights
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def process_test_data(self, input_file, onto_aware, is_labeled=True):
if not self.model:
raise RuntimeError, "Model not trained yet!"
print >>sys.stderr, "Reading test data"
label_ind = []
tagged_sentences = []
for line in open(input_file):
lnstrp = line.strip()
if is_labeled:
label, tagged_sentence = lnstrp.split("\t")
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_ind.append(self.label_map[label])
else:
tagged_sentence = lnstrp
tagged_sentences.append(tagged_sentence)
print >>sys.stderr, "Indexing test data"
# Infer max sentence length if the model is trained
input_shape = self.model.get_input_shape_at(0)[0] # take the shape of the first of two inputs at 0.
sentlenlimit = input_shape[1] # (num_sentences, num_words, num_senses, num_hyps)
test_inputs = self.data_processor.prepare_paired_input(tagged_sentences, onto_aware=onto_aware,
sentlenlimit=sentlenlimit, for_test=True)
test_labels = self.data_processor.make_one_hot(label_ind)
return test_inputs, test_labels
def _factor_target_indices(self, Y_inds, vocab_size=None, base=2):
if vocab_size is None:
vocab_size = len(self.dp.word_index)
print >>sys.stderr, "Factoring targets of vocabulary size: %d"%(vocab_size)
num_vecs = int(math.ceil(math.log(vocab_size)/math.log(base))) + 1
base_inds = []
div_Y_inds = Y_inds
print >>sys.stderr, "Number of factors: %d"%num_vecs
for i in range(num_vecs):
new_inds = div_Y_inds % base
if i == num_vecs - 1:
if new_inds.sum() == 0:
# Most significant "digit" is a zero. Omit it.
break
base_inds.append(new_inds)
div_Y_inds = numpy.copy(div_Y_inds/base)
base_vecs = [self._make_one_hot(base_inds_i, base) for base_inds_i in base_inds]
return base_vecs
def get_attention(self, C_ind):
if not self.model:
raise RuntimeError, "Model not trained!"
model_embedding = None
model_weights = None
for layer in self.model.layers:
if layer.name.lower() == "embedding":
model_embedding = layer
if layer.name.lower() == "sent_lstm":
model_lstm = layer
if model_embedding is None or model_lstm is None:
raise RuntimeError, "Did not find expected layers"
lstm_weights = model_lstm.get_weights()
embedding_weights = model_embedding.get_weights()
embed_in_dim, embed_out_dim = embedding_weights[0].shape
att_embedding = HigherOrderEmbedding(input_dim=embed_in_dim, output_dim=embed_out_dim, weights=embedding_weights)
onto_lstm = OntoAttentionLSTM(input_dim=embed_out_dim, output_dim=embed_out_dim, input_length=model_lstm.input_length, num_senses=self.num_senses, num_hyps=self.num_hyps, use_attention=True, return_attention=True, weights=lstm_weights)
att_input = Input(shape=C_ind.shape[1:], dtype='int32')
att_sent_rep = att_embedding(att_input)
att_output = onto_lstm(att_sent_rep)
att_model = Model(input=att_input, output=att_output)
att_model.compile(optimizer='adam', loss='mse') # optimizer and loss are not needed since we are not going to train this model.
C_att = att_model.predict(C_ind)
print >>sys.stderr, "Got attention values. Input, output shapes:", C_ind.shape, C_att.shape
return C_att
def read_preposition_senses(self):
num_senses_per_prep = []
for filename in os.listdir(self.prep_senses_dir):
if '.defs.xml' in filename:
prep_str = filename.replace('.defs.xml', '')
xml_root = ElementTree.parse("%s/%s" % (self.prep_senses_dir, filename)).getroot()
senses = []
for child_el in xml_root.getchildren():
sense_id = child_el.findtext('senseid')
if sense_id is not None:
# This will add strings like 'into-1(1)'
senses.append("%s-%s" % (prep_str, sense_id))
num_senses_per_prep.append(len(senses))
self.prep_senses[prep_str] = senses
num_preps = len(self.prep_senses)
print >>sys.stderr, "Read senses for %d prepositions." % num_preps
print >>sys.stderr, "Senses per preposition: %f" % (float(sum(num_senses_per_prep))/num_preps)
# TODO: Take a coarse-grained mapping file and implement the following function.
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
libmilter.py 文件源码
项目:sipxecs-voicemail-transcription
作者: andrewsauder
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def debug(msg , level=1 , protId=0):
if not DEBUG: return
if level <= DEBUG:
out = '[%s] DEBUG: ' % time.strftime('%H:%M:%S')
if protId:
out += 'ID: %d ; ' % protId
out += msg
print(out, file=sys.stderr)
# }}}
# Response Constants {{{
#
# Constants for responses back to the MTA. You should use these actions
# at the end of each callback. If none of these are specified,
# CONTINUE is used as the default
#
def create_smcog_hit(cur, feature, gene_id):
'''Create an smCOG hit entry'''
try:
smcog_name, smcog_score, smcog_evalue = parse_smcog(feature)
smcog_score = float(smcog_score)
smcog_evalue = float(smcog_evalue)
smcog_id = get_smcog_id(cur, smcog_name)
cur.execute("SELECT gene_id, smcog_id FROM antismash.smcog_hits WHERE smcog_id = %s AND gene_id = %s", (smcog_id, gene_id))
ret = cur.fetchone()
if ret is None:
cur.execute("INSERT INTO antismash.smcog_hits (score, evalue, smcog_id, gene_id) VALUES (%s, %s, %s, %s)", (smcog_score, smcog_evalue, smcog_id, gene_id))
except ValueError as e:
# no smcog qualifier is an expected error, don't log that
err_msg = str(e)
if not (err_msg.startswith('No smcog qualifier') or
err_msg.startswith('No note qualifier')):
print(e, file=sys.stderr)
download_all_files_with_extension.py 文件源码
项目:cbapi-examples
作者: cbcommunity
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def main(cb, args):
username = args.get("username")
password = args.get("password")
output = args.get("output")
extensions = args.get("extensions").split(",")
listener = ExtensionFileWatcherAndGrabber(args.get('server_url'), cb, username, password, extensions, output)
try:
print "Extension File Watcher and Grabber -- started. Watching for:", extensions
listener.process()
except KeyboardInterrupt:
print >> sys.stderr, "Caught Ctrl-C"
listener.stop()
print "Extension File Watcher and Grabber -- stopped."
def test_main(args):
""" """
parser = argparse.ArgumentParser(description=description())
parser.add_argument('-f',
'--image_file',
default='../examples/GodRoss.jpg',
type=str,
help='The file path of the image to test')
parser.add_argument('test_name',
type=str,
help='The name of the test to perform')
args = parser.parse_args(args)
try:
if args.test_name == "test_many_random":
test_many_random(args.image_file, 5, 5)
elif args.test_name == "test_multi_origin":
test_multi_origin(args.image_file, 4)
else: print("Error: Test function {} doesn't exist".format(args.test_name),
file=sys.stderr)
except OSError:
print("Error: File: {} doesn't exist".format(args.image_file),
file=sys.stderr)
def notify(self, *args, **kwargs):
"""
Call all listener callbacks. The wrapped function is not executed.
"""
for listener in self.nm_listeners:
# Ensure that all callbacks get called, and no exceptions escape to
# the caller.
try:
listener(*args, **kwargs)
except:
# If the target is a notify_callback--which should always be
# the case as long as the proper API is used--show the actual
# function
target = getattr(listener, 'nc_func', listener)
print >>sys.stderr, 'Exception in notification %s:' % (repr(target),)
traceback.print_exception(*sys.exc_info())
def redirectSTDOUT(filename):
if _DEBUG == True:
print "redirectSTDOUT(): redirecting stdout/stderr to filename " + str(filename)
if type(filename) == str:
dirname = os.path.dirname(filename)
if len(dirname) == 0 or \
(len(dirname) > 0 and os.path.isdir(dirname)):
try:
f = open(filename,'w')
# Send stdout and stderr to provided filename
sys.stdout = f
sys.stderr = f
except Exception, e:
print "redirectSTDOUT(): ERROR - Unable to open file " + str(filename) + " for writing stdout and stderr " + str(e)
elif type(filename) == cStringIO.OutputType:
sys.stdout = filename
sys.stderr = filename
else:
print 'redirectSTDOUT(): failed to redirect stdout/stderr to ' + str(filename)
print 'redirectSTDOUT(): argument must be: string filename, cStringIO.StringIO object'
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
subprocess.call(command)
except OSError as e:
if e.errno == errno.ENOENT:
if level:
message = "{}: {}".format(level, message)
message = "juju-log: {}".format(message)
print(message, file=sys.stderr)
else:
raise
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
subprocess.call(command)
except OSError as e:
if e.errno == errno.ENOENT:
if level:
message = "{}: {}".format(level, message)
message = "juju-log: {}".format(message)
print(message, file=sys.stderr)
else:
raise
def _install_modules(command_table):
for cmd in command_table:
command_table[cmd].load_arguments()
try:
mods_ns_pkg = import_module('azure.cli.command_modules')
installed_command_modules = [modname for _, modname, _ in
pkgutil.iter_modules(mods_ns_pkg.__path__)
if modname not in BLACKLISTED_MODS]
except ImportError:
pass
for mod in installed_command_modules:
try:
mod = import_module('azure.cli.command_modules.' + mod)
mod.load_params(mod)
mod.load_commands()
except Exception: # pylint: disable=broad-except
print("Error loading: {}".format(mod), file=stderr)
traceback.print_exc(file=stderr)
_update_command_definitions(command_table)
def _run(self):
with tf.Session() as session:
self.io.restore_session(session)
inputs = sys.stdin
singsen = SingleSentenceData()
scounter = SpeedCounter().start()
while True:
senlen = singsen.read_from_file(sys.stdin, self.io.w2id)
if senlen is None:
break
if senlen < 2:
print(-9999)
continue
o = run_epoch(session, self.test_model, singsen)
scounter.next()
if self.params.progress and scounter.val % 20 ==0:
print("\rLoglikes per secs: %f" % scounter.speed, end="", file=sys.stderr)
print("%f" % o)
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def s_unload(self, *args):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_unload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_unload, args)
# Restricted open(...)
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, repl)
if code != 354:
raise SMTPDataError(code, repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, msg)
return (code, msg)
def connect(self, host='localhost', port=0):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', host
if self.sock:
self.sock.close()
self.sock = None
raise socket.error, msg
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.