def __init__(self, manager, config, timer, base_dir, backup_dir, tailed_oplogs, backup_oplogs):
super(Resolver, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir)
self.tailed_oplogs = tailed_oplogs
self.backup_oplogs = backup_oplogs
self.compression_supported = ['none', 'gzip']
self.resolver_summary = {}
self.resolver_state = {}
self.running = False
self.stopped = False
self.completed = False
self._pool = None
self._pooled = []
self._results = {}
try:
self._pool = Pool(processes=self.threads(None, 2))
except Exception, e:
logging.fatal("Could not start oplog resolver pool! Error: %s" % e)
raise Error(e)
python类fatal()的实例源码
def __init__(self, config, timer, db):
self.config = config
self.timer = timer
self.db = db
self.balancer_wait_secs = self.config.sharding.balancer.wait_secs
self.balancer_sleep = self.config.sharding.balancer.ping_secs
self.timer_name = self.__class__.__name__
self.config_server = None
self.config_db = None
self.mongos_db = None
self._balancer_state_start = None
self.restored = False
# Get a DB connection
try:
if isinstance(self.db, DB):
self.connection = self.db.connection()
if not self.db.is_mongos() and not self.db.is_configsvr():
raise DBOperationError('MongoDB connection is not to a mongos or configsvr!')
else:
raise Error("'db' field is not an instance of class: 'DB'!")
except Exception, e:
logging.fatal("Could not get DB connection! Error: %s" % e)
raise DBOperationError(e)
def set_balancer(self, value):
try:
if self.is_gte_34():
# 3.4+ configsvrs dont have balancerStart/Stop, even though they're the balancer!
# Use self.get_mongos() to get a mongos connection for now
if value is True:
self.get_mongos().admin_command("balancerStart")
else:
self.get_mongos().admin_command("balancerStop")
else:
if value is True:
set_value = False
elif value is False:
set_value = True
else:
set_value = True
config = self.connection['config']
config['settings'].update_one({'_id': 'balancer'}, {'$set': {'stopped': set_value}})
except Exception, e:
logging.fatal("Failed to set balancer state! Error: %s" % e)
raise DBOperationError(e)
def stop_balancer(self):
logging.info("Stopping the balancer and waiting a max of %i sec" % self.balancer_wait_secs)
wait_cnt = 0
self.timer.start(self.timer_name)
self.set_balancer(False)
while wait_cnt < self.balancer_wait_secs:
if self.check_balancer_running():
wait_cnt += self.balancer_sleep
logging.info("Balancer is still running, sleeping for %i sec(s)" % self.balancer_sleep)
sleep(self.balancer_sleep)
else:
self.timer.stop(self.timer_name)
logging.info("Balancer stopped after %.2f seconds" % self.timer.duration(self.timer_name))
return
logging.fatal("Could not stop balancer %s!" % self.db.uri)
raise DBOperationError("Could not stop balancer %s" % self.db.uri)
def get_config_server(self, force=False):
if force or not self.config_server:
configdb_uri = self.get_configdb_hosts()
try:
logging.info("Found sharding config server: %s" % configdb_uri)
if self.db.uri.hosts() == configdb_uri.hosts():
self.config_db = self.db
logging.debug("Re-using seed connection to config server(s)")
else:
self.config_db = DB(configdb_uri, self.config, True)
if self.config_db.is_replset():
self.config_server = Replset(self.config, self.config_db)
else:
self.config_server = {'host': configdb_uri.hosts()}
self.config_db.close()
except Exception, e:
logging.fatal("Unable to locate config servers using %s: %s!" % (self.db.uri, e))
raise OperationError(e)
return self.config_server
def readChecksums(fileIn):
# Read checksum file, return contents as nested list
# Also strip away any file paths if they exist (return names only)
try:
data = []
f = open(fileIn,"r", encoding="utf-8")
for row in f:
rowSplit = row.split(' ', 1)
# Second col contains file name. Strip away any path components if they are present
fileName = rowSplit[1].strip() # Raises IndexError if entry only 1 col (malformed checksum file)!
rowSplit[1] = os.path.basename(fileName)
data.append(rowSplit)
f.close()
return(data)
except IOError:
logging.fatal("cannot read '" + fileIn + "'")
config.errors += 1
errorExit(config.errors, config.warnings)
def _logWriter(self,level,message,exception=None):
self._logger.setLevel(level)
self._fh.setLevel(level)
self._ch.setLevel(level)
if(exception!=None):
exFormatted = self._formatException(exception)
msg = "%s%s" % (message,exFormatted)
if(level==logging.DEBUG):
logging.debug(msg)
elif(level==logging.INFO):
logging.info(msg)
elif(level==logging.WARN):
logging.warn(msg)
elif(level==logging.FATAL):
logging.fatal(msg)
if(level==logging.ERROR):
logging.error(msg)
def discover(conf_dir):
version = k8s.get_kubelet_version(None)
if version == "v1.8.0":
logging.fatal("K8s 1.8.0 is not supported. Update K8s to "
"version >=1.8.1 or rollback to previous versions")
if version >= "v1.8.1":
# Patch the node with the appropriate CMK ER.
logging.debug("Patching the node with the appropriate CMK ER.")
add_node_er(conf_dir)
else:
# Patch the node with the appropriate CMK OIR.
logging.debug("Patching the node with the appropriate CMK OIR.")
add_node_oir(conf_dir)
# Add appropriate CMK label to the node.
logging.debug("Adding appropriate CMK label to the node.")
add_node_label()
# Add appropriate CMK taint to the node.
logging.debug("Adding appropriate CMK taint to the node.")
add_node_taint()
# add_node_oir patches the node with the appropriate CMK OIR.
def send_msg(self, msg_type, ies):
"""
Encodes and sends the message to the IPA layer.
"""
# Calc the maximum length possible for the message, and allocate memory
buf_size = _GSUP.get_max_bytes(ies)
(buf, offset) = self._ipa_writer.get_write_buf(buf_size)
try:
msg_len = _GSUP.encode(buf, offset, msg_type, ies)
except GSUPCodecError as err:
# Encoding should always succeed
logging.fatal(
"Encoding failed with err: %s, for msg: %s, ies: %s",
err, msg_type, ies)
return
# Reset the length in the IPA header based on actual msg size
self._ipa_writer.reset_length(buf, msg_len - offset)
# Write the encoded msg
self._ipa_writer.write(buf[:msg_len])
def __rebase_globals(old, new, size, iterable):
node = internal.comment.tagging.node()
failure, total = [], list(iterable)
for i, (ea, count) in enumerate(total):
# remove the old address
ok = internal.netnode.alt.remove(node, ea)
if not ok:
logging.fatal("{:s}.rebase : Failure trying to remove refcount for {:x} : {!r}".format(__name__, ea, count))
# now add the new address
res = ea - old + new
ok = internal.netnode.alt.set(node, res, count)
if not ok:
logging.fatal("{:s}.rebase : Failure trying to store refcount from {:x} to {:x} : {!r}".format(__name__, ea, res, count))
failure.append((ea, res, count))
yield i, ea
return
# address naming
def pop(self):
'''Pop a result off of the result queue.'''
cls = self.__class__
if not self.thread.is_alive():
logging.fatal("{:s}.pop : Refusing to wait for a result when execution queue has already terminated. : {!r}".format('.'.join(('internal',__name__,cls.__name__)), self))
raise Queue.Empty
logging.debug("{:s}.pop : Popping result off of execution queue. : {!r}".format('.'.join(('internal',__name__,cls.__name__)), self))
try:
_, res, err = self.result.get(block=0)
if err != (None, None, None):
t, e, tb = err
raise t, e, tb
finally:
self.result.task_done()
return res
def redis(self):
"""
Lazy-loaded redis connection
"""
from redis import StrictRedis
import redis.exceptions
try:
url = os.environ.get('REDIS_CONNECTION_URL',
'redis://localhost:6379/0')
conn = StrictRedis.from_url(url)
conn.ping()
except redis.exceptions.ConnectionError:
logging.fatal("Redis server is not running")
raise
return conn
def configure_redis():
"""
Creates a connection to the local Redis server, then returns the active
connection.
:return: object: the active Redis object.
"""
try:
url = os.getenv('REDIS_CONNECTION_URL', 'redis://localhost:6379/0')
redis_server = redis.StrictRedis.from_url(url)
redis_server.ping()
except redis.exceptions.ConnectionError:
logging.fatal("Redis server is not running! Exiting!")
sys.exit(1)
return redis_server
def parse_args(parser):
"""http://codereview.stackexchange.com/questions/79008/parse-a-config-file-
and-add-to-command-line-arguments-using-argparse-in-python """
args = parser.parse_args()
if args.config_file:
if not YAML_AVAILABLE:
logging.fatal("Install PyYAML in order to use config files.")
return args
data = yaml.load(args.config_file)
delattr(args, 'config_file')
arg_dict = args.__dict__
for key, value in data.items():
if isinstance(value, list):
for v in value:
arg_dict[key].append(v)
else:
arg_dict[key] = value
return args
def parse_param_string(param):
"""Parses a parameter string such as 'param1=x,param2=y'. Loads
config files if specified in the string. If ``param`` points to a
file, load this file with YAML.
"""
if not param:
return {}
if os.path.isfile(param):
param = "config_file=%s" % param
config = {}
for pair in param.strip().split(","):
(k,v) = pair.split("=", 1)
if k == 'config_file':
if not YAML_AVAILABLE:
logging.fatal("Install PyYAML in order to use config files.")
else:
with open(v) as f:
data = yaml.load(f)
for config_file_key, config_file_value in data.items():
config[config_file_key] = config_file_value
else:
config[k] = v
return config
def set_up_predictor(self, nmt_model_path):
"""Initializes the predictor with the given NMT model. Code
following ``blocks.machine_translation.main``.
"""
self.src_vocab_size = self.config['src_vocab_size']
self.trgt_vocab_size = self.config['trg_vocab_size']
self.nmt_model = NMTModel(self.config)
self.nmt_model.set_up()
loader = LoadNMTUtils(nmt_model_path,
self.config['saveto'],
self.nmt_model.search_model)
loader.load_weights()
self.best_models = []
self.val_bleu_curve = []
self.src_sparse_feat_map = self.config['src_sparse_feat_map'] \
if self.config['src_sparse_feat_map'] else FlatSparseFeatMap()
if self.config['trg_sparse_feat_map']:
logging.fatal("Cannot use bounded vocabulary predictor with "
"a target sparse feature map. Ignoring...")
self.search_algorithm = MyopticSearch(samples=self.nmt_model.samples)
self.search_algorithm.compile()
def load_map(self, path):
"""Load a index map file. Mappings should be bijections, but
there is no sanity check in place to verify this.
Args:
path (string): Path to the mapping file
Returns:
dict. Mapping from SGNMT index to slave predictor index
"""
with open(path) as f:
d = dict(map(int, line.strip().split(None, 1)) for line in f)
if (d[utils.UNK_ID] != utils.UNK_ID
or d[utils.EOS_ID] != utils.EOS_ID
or d[utils.GO_ID] != utils.GO_ID):
logging.fatal(
"idxmap %s contains non-identical maps for reserved indices"
% path)
logging.debug("Loaded wmap from %s" % path)
return [d[idx] if idx in d else 0 for idx in range(max(d)+1)]
def _get_sentence_indices(range_param, src_sentences):
"""Helper method for ``do_decode`` which returns the indices of the
sentence to decode
Args:
range_param (string): ``--range`` parameter from config
src_sentences (list): A list of strings. The strings are the
source sentences with word indices to
translate (e.g. '1 123 432 2')
"""
if args.range:
try:
if ":" in args.range:
from_idx,to_idx = args.range.split(":")
else:
from_idx = int(args.range)
to_idx = from_idx
return xrange(int(from_idx)-1, int(to_idx))
except Exception as e:
logging.fatal("Invalid value for --range: %s" % e)
return []
if src_sentences is False:
logging.fatal("Input method dummy requires --range")
return []
return xrange(len(src_sentences))
def initial_states(self, batch_size, *args, **kwargs):
"""Returns the initial state depending on ``init_strategy``."""
attended = kwargs['attended']
if self.init_strategy == 'constant':
initial_state = [tensor.repeat(self.parameters[2][None, :],
batch_size,
0)]
elif self.init_strategy == 'last':
initial_state = self.initial_transformer.apply(
attended[0, :, -self.attended_dim:])
elif self.init_strategy == 'average':
initial_state = self.initial_transformer.apply(
attended[:, :, -self.attended_dim:].mean(0))
else:
logging.fatal("dec_init parameter %s invalid" % self.init_strategy)
return initial_state
def get_nmt_model_path(nmt_model_selector, nmt_config):
"""Get the path to the NMT model according the given NMT config.
This switches between the most recent checkpoint, the best BLEU
checkpoint, or the latest parameters (params.npz). This method
delegates to ``get_nmt_model_path_*``. This
method relies on the global ``args`` variable.
Args:
nmt_model_selector (string): the ``--nmt_model_selector`` arg
which defines the policy to decide
which NMT model to load (params,
bleu, or time)
nmt_config (dict): NMT configuration, see ``get_nmt_config()``
Returns:
string. Path to the NMT model file
"""
if nmt_model_selector == 'params':
return get_nmt_model_path_params(nmt_config)
elif nmt_model_selector == 'bleu':
return get_nmt_model_path_best_bleu(nmt_config)
elif nmt_model_selector == 'time':
return get_nmt_model_path_most_recent(nmt_config)
logging.fatal("NMT model selector %s not available. Please double-check "
"the --nmt_model_selector parameter." % nmt_model_selector)
def tf_get_nmt_predictor(args, nmt_path, nmt_config):
"""Get the TensorFlow NMT predictor.
Args:
args (object): SGNMT arguments from ``ArgumentParser``
nmt_config (string): NMT configuration
path (string): Path to NMT model or directory
Returns:
Predictor. An instance of ``TensorFlowNMTPredictor``
"""
if not TENSORFLOW_AVAILABLE:
logging.fatal("Could not find TensorFlow!")
return None
logging.info("Loading tensorflow nmt predictor")
if os.path.isdir(nmt_path):
nmt_config['train_dir'] = nmt_path
elif os.path.isfile(nmt_path):
nmt_config['model_path'] = nmt_path
global session
if not session:
session = tf.Session()
return TensorFlowNMTPredictor(args.cache_nmt_posteriors, nmt_config, session)
def tf_get_rnnlm_predictor(rnnlm_path, rnnlm_config, variable_prefix="model"):
"""Get the TensorFlow RNNLM predictor.
Args:
rnnlm_config (string): RNNLM configuration
path (string): Path to RNNLM model or directory
variable_prefix(string): prefix of model variables
Returns:
Predictor. An instance of ``TensorFlowRNNLMPredictor``
"""
if not TENSORFLOW_AVAILABLE:
logging.fatal("Could not find TensorFlow!")
return None
logging.info("Loading tensorflow rnnlm predictor")
return TensorFlowRNNLMPredictor(rnnlm_path, rnnlm_config, variable_prefix)
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
try:
self.server = make_server(self.host, self.port, handler,
**self.options)
self.romana_http.wsgi_server_started = True
logging.info("HTTP server: Started to listen...")
self.server.serve_forever()
except socket.error as e:
logging.fatal("HTTP server: Cannot open socket "
"(error %d: %s)... " %
(e.errno, e.strerror))
compute_metrics.py 文件源码
项目:vessel-classification
作者: GlobalFishingWatch
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def _parse(x):
if isinstance(x, datetime.datetime):
return x
# 2014-08-28T13:56:16+00:00
# TODO: fix generation to generate consistent datetimes
if x[-6:] == '+00:00':
x = x[:-6]
if x.endswith('.999999'):
x = x[:-7]
if x.endswith('Z'):
x = x[:-1]
try:
dt = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
except:
logging.fatal('Could not parse "%s"', x)
raise
return dt.replace(tzinfo=pytz.UTC)
def open(self):
if not self._oplog:
try:
logging.debug("Opening oplog file %s" % self.oplog_file)
if self.do_gzip:
self._oplog = GzipFile(self.oplog_file, self.file_mode)
else:
self._oplog = open(self.oplog_file, self.file_mode)
except Exception, e:
logging.fatal("Error opening oplog file %s! Error: %s" % (self.oplog_file, e))
raise OperationError(e)
return self._oplog
def load(self):
try:
oplog = self.open()
logging.debug("Reading oplog file %s" % self.oplog_file)
for change in decode_file_iter(oplog, CodecOptions(unicode_decode_error_handler="ignore")):
if 'ts' in change:
self._last_ts = change['ts']
if self._first_ts is None and self._last_ts is not None:
self._first_ts = self._last_ts
self._count += 1
oplog.close()
except Exception, e:
logging.fatal("Error reading oplog file %s! Error: %s" % (self.oplog_file, e))
raise OperationError(e)
def add(self, doc, autoflush=True):
try:
self._oplog.write(BSON.encode(doc))
self._writes_unflushed += 1
self._count += 1
if not self._first_ts:
self._first_ts = doc['ts']
self._last_ts = doc['ts']
if autoflush:
self.autoflush()
except Exception, e:
logging.fatal("Cannot write to oplog file %s! Error: %s" % (self.oplog_file, e))
raise OperationError(e)
def run(self):
try:
for mp in self.bucket.get_all_multipart_uploads():
if mp.id == self.multipart_id:
logging.info("Uploading file: %s (part num: %s)" % (self.file_name, self.part_num))
with FileChunkIO(self.file_name, 'r', offset=self.offset, bytes=self.byte_count) as fp:
mp.upload_part_from_file(fp=fp, part_num=self.part_num)
logging.debug("Uploaded file: %s (part num: %s)" % (self.file_name, self.part_num))
break
except Exception, e:
logging.fatal("AWS S3 multipart upload failed after %i retries! Error: %s" % (self.retries, e))
sys.exit(1)
def restore_balancer_state(self):
if self._balancer_state_start is not None and not self.restored:
try:
logging.info("Restoring balancer state to: %s" % str(self._balancer_state_start))
self.set_balancer(self._balancer_state_start)
self.restored = True
except Exception, e:
logging.fatal("Failed to set balancer state! Error: %s" % e)
raise DBOperationError(e)
def setup_signal_handlers(self):
try:
signal.signal(signal.SIGINT, self.cleanup_and_exit)
signal.signal(signal.SIGTERM, self.cleanup_and_exit)
except Exception, e:
logging.fatal("Cannot setup signal handlers, error: %s" % e)
sys.exit(1)