def _raw_input(prompt="", stream=None, input=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
python类stderr()的实例源码
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions:
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
exc_info = None
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
try:
func(*targs, **kargs)
except SystemExit:
exc_info = sys.exc_info()
except:
import traceback
print >> sys.stderr, "Error in atexit._run_exitfuncs:"
traceback.print_exc()
exc_info = sys.exc_info()
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout)
else:
func(sys.stdin, sys.stdout)
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
download_all_files_with_extension.py 文件源码
项目:cbapi-python
作者: carbonblack
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def main(cb, args):
username = args.get("username")
password = args.get("password")
output = args.get("output")
extensions = args.get("extensions").split(",")
listener = ExtensionFileWatcherAndGrabber(args.get('server_url'), cb, username, password, extensions, output)
try:
print "Extension File Watcher and Grabber -- started. Watching for:", extensions
listener.process()
except KeyboardInterrupt:
print >> sys.stderr, "Caught Ctrl-C"
listener.stop()
print "Extension File Watcher and Grabber -- stopped."
def add_to_vcs(self, summary):
if (
self.git_add and
(SyncStatus.DELETED in summary or SyncStatus.ADDED in summary) and
not self.dry_run and
self.confirm(
question=(
'Do you want to add created and removed files to GIT?'
)
)
):
output, errors = subprocess.Popen(
['git', '-C', app_settings.SYNC_DIRECTORY,
'add', '-A', app_settings.SYNC_DIRECTORY],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
if errors:
raise self.error('Adding file changes to GIT failed!')
def reporterrors(self, job, jobres):
# type: (ExeCall, ExeResult) -> None
if not self.should_report_error(job, jobres):
return
category = INFO_PROCERRORS
if jobres.error is not None:
iprint(category, red("Error: calling %s caused this error: %s" % (job.exe,
jobres.error)))
else:
iprint(category, red("Error: %s returned code %s" % (job.exe, jobres.returncode)))
iprint(category, " for these arguments: %s" % colored_cmdargs(job.cmdargs, RED))
if jobres.stderr:
text = jobres.stderr
try:
text = unistr(text)
except UnicodeDecodeError:
pass
iprint(INFO_PROCERRORS, 'formatter stderr:"""\\\n%s"""' % red(text))
def debug(self, fh=sys.stderr):
self.cursor.execute('select * from kv')
pprint.pprint(self.cursor.fetchall(), stream=fh)
self.cursor.execute('select * from kv_revisions')
pprint.pprint(self.cursor.fetchall(), stream=fh)
def get_language_code(lang_code, feature_database):
# first, normalize to an ISO 639-3 code
if lang_code in LETTER_CODES:
lang_code = LETTER_CODES[lang_code]
if lang_code not in feature_database["langs"]:
print("ERROR: Language " + lang_code + " not found.", file=sys.stderr)
sys.exit(2)
return lang_code
def __init__(self):
self.__set_encoding()
# Prepare in/out/err streams
self.fperror = sys.stderr
self.fpinput = sys.stdin
self.fpoutput = sys.stdout
# Load input
self.__input = json.load(self.fpinput)
# Set parameters
self.data_type = self.get_param('dataType', None, 'Missing dataType field')
self.tlp = self.get_param('tlp', 2)
self.enable_check_tlp = self.get_param('config.check_tlp', False)
self.max_tlp = self.get_param('config.max_tlp', 2)
# Set proxy configuration if available
self.http_proxy = self.get_param('config.proxy.http')
self.https_proxy = self.get_param('config.proxy.https')
self.__set_proxies()
# Finally run check tlp
if not (self.__check_tlp()):
self.error('TLP is higher than allowed.')
# Not breaking compatibility
self.artifact = self.__input
# Check for auto extraction config
self.auto_extract = self.get_param('config.auto_extract', True)
# Not breaking compatibility
def __set_encoding(self):
try:
if sys.stdout.encoding != 'UTF-8':
if sys.version_info[0] == 3:
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
else:
sys.stdout = codecs.getwriter('utf-8')(sys.stdout, 'strict')
if sys.stderr.encoding != 'UTF-8':
if sys.version_info[0] == 3:
sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')
else:
sys.stderr = codecs.getwriter('utf-8')(sys.stderr, 'strict')
except:
pass
def failureMessage(message):
""" Displaying a message."""
printLine(message, "\n", sys.stderr)
def CheckForUpdates(fileServerPort, debug):
"""
Check for updates.
Channel options are stable, beta & alpha
Patches are only created & applied on the stable channel
"""
assert CLIENT_CONFIG.PUBLIC_KEY is not None
client = Client(CLIENT_CONFIG, refresh=True)
appUpdate = client.update_check(CLIENT_CONFIG.APP_NAME,
wxupdatedemo.__version__,
channel='stable')
if appUpdate:
if hasattr(sys, "frozen"):
downloaded = appUpdate.download()
if downloaded:
status = UpdateStatus.EXTRACTING_UPDATE_AND_RESTARTING
if 'WXUPDATEDEMO_TESTING_FROZEN' in os.environ:
sys.stderr.write("Exiting with status: %s\n"
% UPDATE_STATUS_STR[status])
ShutDownFileServer(fileServerPort)
sys.exit(0)
ShutDownFileServer(fileServerPort)
if debug:
logger.debug('Extracting update and restarting...')
time.sleep(10)
appUpdate.extract_restart()
else:
status = UpdateStatus.UPDATE_DOWNLOAD_FAILED
else:
status = UpdateStatus.UPDATE_AVAILABLE_BUT_APP_NOT_FROZEN
else:
status = UpdateStatus.NO_AVAILABLE_UPDATES
return status
def Run(argv, clientConfig=None):
"""
The main entry point.
"""
args = ParseArgs(argv)
if args.version:
DisplayVersionAndExit()
InitializeLogging(args.debug)
fileServerDir = os.environ.get('PYUPDATER_FILESERVER_DIR')
fileServerPort = StartFileServer(fileServerDir)
if fileServerPort:
UpdatePyUpdaterClientConfig(clientConfig, fileServerPort)
status = CheckForUpdates(fileServerPort, args.debug)
else:
status = UpdateStatus.COULDNT_CHECK_FOR_UPDATES
if 'WXUPDATEDEMO_TESTING_FROZEN' in os.environ:
sys.stderr.write("Exiting with status: %s\n"
% UPDATE_STATUS_STR[status])
ShutDownFileServer(fileServerPort)
sys.exit(0)
mainLoop = (argv[0] != 'RunTester')
if not 'WXUPDATEDEMO_TESTING_FROZEN' in os.environ:
return PyUpdaterWxDemoApp.Run(
fileServerPort, UPDATE_STATUS_STR[status], mainLoop)
else:
return None
def _get_embedding_layer(self, embedding_file=None):
if self.embedding_layer is None:
word_vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
synset_vocab_size = self.data_processor.get_vocab_size(onto_aware=True)
if embedding_file is None:
if not self.tune_embedding:
print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
self.tune_embedding = True
embedding_weights = None
else:
# TODO: Other sources for prior initialization
embedding = self.data_processor.get_embedding_matrix(embedding_file, onto_aware=True)
# Put the embedding in a list for Keras to treat it as weights of the embedding layer.
embedding_weights = [embedding]
if self.set_sense_priors:
initial_sense_prior_parameters = numpy.random.uniform(low=0.01, high=0.99,
size=(word_vocab_size, 1))
# While setting weights, Keras wants trainable weights first, and then the non trainable
# weights. If we are not tuning the embedding, we need to keep the sense priors first.
if not self.tune_embedding:
embedding_weights = [initial_sense_prior_parameters] + embedding_weights
else:
embedding_weights.append(initial_sense_prior_parameters)
self.embedding_layer = OntoAwareEmbedding(word_vocab_size, synset_vocab_size, self.embed_dim,
weights=embedding_weights, mask_zero=True,
set_sense_priors=self.set_sense_priors,
tune_embedding=self.tune_embedding,
name="embedding")
return self.embedding_layer
def process_data(self, input_file, onto_aware, for_test=False):
'''
Reads an input file and makes input for training or testing.
'''
dataset_type = "test" if for_test else "training"
print >>sys.stderr, "Reading %s data" % dataset_type
label_ind = []
tagged_sentences = []
max_sentence_length = 0
all_sentence_lengths = []
for line in open(input_file):
lnstrp = line.strip()
label, tagged_sentence = lnstrp.split("\t")
sentence_length = len(tagged_sentence.split())
all_sentence_lengths.append(sentence_length)
if sentence_length > max_sentence_length:
max_sentence_length = sentence_length
label_ind.append(int(label))
tagged_sentences.append(tagged_sentence)
if for_test:
if not self.model:
raise RuntimeError("Model not trained yet!")
input_shape = self.model.get_input_shape_at(0) # (num_sentences, num_words, ...)
sentlenlimit = input_shape[1]
else:
sentlenlimit = max_sentence_length
# We need to readjust the labels because padding would affect the sentence indices.
for i in range(len(label_ind)):
length = all_sentence_lengths[i]
label_ind[i] += sentlenlimit - length
if not for_test:
# Shuffling so that when Keras does validation split, it is not always at the end.
sentences_and_labels = zip(tagged_sentences, label_ind)
random.shuffle(sentences_and_labels)
tagged_sentences, label_ind = zip(*sentences_and_labels)
print >>sys.stderr, "Indexing %s data" % dataset_type
inputs = self.data_processor.prepare_input(tagged_sentences, onto_aware=onto_aware,
sentlenlimit=sentlenlimit, for_test=for_test,
remove_singletons=False)
labels = self.data_processor.make_one_hot(label_ind)
return inputs, labels
def define_attention_model(self):
'''
Take necessary parts out of the model to get OntoLSTM attention.
'''
if not self.model:
raise RuntimeError("Model not trained yet!")
input_shape = self.model.get_input_shape_at(0)
input_layer = Input(input_shape[1:], dtype='int32') # removing batch size
embedding_layer = None
encoder_layer = None
for layer in self.model.layers:
if layer.name == "embedding":
embedding_layer = layer
elif layer.name == "onto_lstm":
# We need to redefine the OntoLSTM layer with the learned weights and set return attention to True.
# Assuming we'll want attention values for all words (return_sequences = True)
if isinstance(layer, Bidirectional):
onto_lstm = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
num_senses=self.num_senses, num_hyps=self.num_hyps,
use_attention=True, return_attention=True, return_sequences=True,
consume_less='gpu')
encoder_layer = Bidirectional(onto_lstm, weights=layer.get_weights())
else:
encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim,
output_dim=self.embed_dim, num_senses=self.num_senses,
num_hyps=self.num_hyps, use_attention=True,
return_attention=True, return_sequences=True,
consume_less='gpu', weights=layer.get_weights())
break
if not embedding_layer or not encoder_layer:
raise RuntimeError("Required layers not found!")
attention_output = encoder_layer(embedding_layer(input_layer))
self.attention_model = Model(inputs=input_layer, outputs=attention_output)
print >>sys.stderr, "Attention model summary:"
self.attention_model.summary()
self.attention_model.compile(loss="mse", optimizer="sgd") # Loss and optimizer do not matter!