python类warn()的实例源码

charm_helpers_sync.py 文件源码 项目:charm-plumgrid-gateway 作者: openstack 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def sync(src, dest, module, opts=None):

    # Sync charmhelpers/__init__.py for bootstrap code.
    sync_pyfile(_src_path(src, '__init__'), dest)

    # Sync other __init__.py files in the path leading to module.
    m = []
    steps = module.split('.')[:-1]
    while steps:
        m.append(steps.pop(0))
        init = '.'.join(m + ['__init__'])
        sync_pyfile(_src_path(src, init),
                    os.path.dirname(_dest_path(dest, init)))

    # Sync the module, or maybe a .py file.
    if os.path.isdir(_src_path(src, module)):
        sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
    elif _is_pyfile(_src_path(src, module)):
        sync_pyfile(_src_path(src, module),
                    os.path.dirname(_dest_path(dest, module)))
    else:
        logging.warn('Could not sync: %s. Neither a pyfile or directory, '
                     'does it even exist?' % module)
client.py 文件源码 项目:gql 作者: graphql-python 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _get_result(self, document, *args, **kwargs):
        if not self.retries:
            return self.transport.execute(document, *args, **kwargs)

        last_exception = None
        retries_count = 0
        while retries_count < self.retries:
            try:
                result = self.transport.execute(document, *args, **kwargs)
                return result
            except Exception as e:
                last_exception = e
                logging.warn("Request failed with exception %s. Retrying for the %s time...",
                             e, retries_count + 1, exc_info=True)
            finally:
                retries_count += 1

        raise RetryError(retries_count, last_exception)
ContentConsumer.py 文件源码 项目:PyPPSPP 作者: justas- 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def thread_entry(self):
        """Entry point for consumption happening in the thread"""

        try:
            # Keep beffering for some time
            self._buffer_start = time.time()
            while self._q.qsize() < self._video_buffer_sz and not self._stop_thread:
                logging.info('Buffering. Q: %s', self._q.qsize())
                time.sleep(0.25)

            # Set the start time
            self._start_time = time.time()

            # Run until requested to stop
            while not self._stop_thread:
                self.__consume()
                time.sleep(1 / self._fps)

            # When stopped - indicate stop time
            self._stop_time = time.time()
            return

        except KeyboardInterrupt:
            logging.warn('Except in consume thread!')
            pass
haobtccny.py 文件源码 项目:bitcoin-arbitrage 作者: ucfyao 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _cancel_order(self, order_id):
        response = self.market.cancel(order_id)

        if not response:
            return response

        if response and "code" in response:
            logging.warn (response)
            return False

        resp_order_id = response['order_id']
        if resp_order_id == -1:
            logging.warn("cancel order #%s failed, %s" % (order_id, resp_order_id))
            return False
        else:
            logging.debug("Canceled order #%s ok" % (order_id))
            return True
        return True
huobicny.py 文件源码 项目:bitcoin-arbitrage 作者: ucfyao 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_info(self):
        """Get balance"""
        try:
            response = self.market.accountInfo()
            if response and "code" in response:
                logging.warn(response)
                return False
                raise TradeException(response["message"])
            if response:
                self.btc_balance = float(response["available_btc_display"])
                self.cny_balance = float(response["available_cny_display"])
                self.btc_frozen = float(response["frozen_btc_display"])
                self.cny_frozen = float(response["frozen_cny_display"])
        except  Exception as ex:
            logging.warn("get_info failed :%s" % ex)
            traceback.print_exc()

            return False
basicbot.py 文件源码 项目:bitcoin-arbitrage 作者: ucfyao 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def notify_msg(self, type, price):
        import zmq
        try:
            context = zmq.Context()
            socket = context.socket(zmq.PUSH)

            socket.connect ("tcp://%s:%s" % (config.ZMQ_HOST, config.ZMQ_PORT))
            time.sleep(1)

            message = {'type':type, 'price':price}
            logging.info( "notify message %s", json.dumps(message))

            socket.send_string(json.dumps(message))
        except Exception as e:
            logging.warn("notify_msg Exception")
            pass
proxy.py 文件源码 项目:Proxy46 作者: Macronut 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def handle(self):
        client_ip = self.client_address[0]

        addr = ''
        server = ''
        try:
            sock = self.connection
            sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)

            odestdata = sock.getsockopt(socket.SOL_IP, 80, 16)
            port, addr_ip = struct.unpack("!xxH4sxxxxxxxx", odestdata)
            addr = socket.inet_ntoa(addr_ip)

            server = reverse(addr)

            print_log('%s connecting %s:%d %d %s' % (client_ip, addr, port, server[0], str(server[1])))

            Proxy[server[0]].proxy(sock, server[1], (addr, port))
        except socket.error, e:
            logging.warn(addr + ':' + str(server) + ':' + str(e))
            sock.close()
db_transfer.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pull_db_all_user(self):
        import cymysql
        #?????????
        if self.cfg["ssl_enable"] == 1:
            conn = cymysql.connect(host=self.cfg["host"], port=self.cfg["port"],
                    user=self.cfg["user"], passwd=self.cfg["password"],
                    db=self.cfg["db"], charset='utf8',
                    ssl={'ca':self.cfg["ssl_ca"],'cert':self.cfg["ssl_cert"],'key':self.cfg["ssl_key"]})
        else:
            conn = cymysql.connect(host=self.cfg["host"], port=self.cfg["port"],
                    user=self.cfg["user"], passwd=self.cfg["password"],
                    db=self.cfg["db"], charset='utf8')

        try:
            rows = self.pull_db_users(conn)
        finally:
            conn.close()

        if not rows:
            logging.warn('no user in db')
        return rows
db_transfer.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def pull_db_all_user(self):
        import json
        rows = None

        config_path = get_config().MUDB_FILE
        with open(config_path, 'rb+') as f:
            rows = json.loads(f.read().decode('utf8'))
            for row in rows:
                try:
                    if 'forbidden_ip' in row:
                        row['forbidden_ip'] = common.IPNetwork(row['forbidden_ip'])
                except Exception as e:
                    logging.error(e)
                try:
                    if 'forbidden_port' in row:
                        row['forbidden_port'] = common.PortRange(row['forbidden_port'])
                except Exception as e:
                    logging.error(e)

        if not rows:
            logging.warn('no user in json file')
        return rows
server_pool.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def update_mu_users(self, port, users):
        port = int(port)
        if port in self.tcp_servers_pool:
            try:
                self.tcp_servers_pool[port].update_users(users)
            except Exception as e:
                logging.warn(e)
            try:
                self.udp_servers_pool[port].update_users(users)
            except Exception as e:
                logging.warn(e)
        if port in self.tcp_ipv6_servers_pool:
            try:
                self.tcp_ipv6_servers_pool[port].update_users(users)
            except Exception as e:
                logging.warn(e)
            try:
                self.udp_ipv6_servers_pool[port].update_users(users)
            except Exception as e:
                logging.warn(e)
tcprelay.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _socket_bind_addr(self, sock, af):
        bind_addr = ''
        if self._bind and af == socket.AF_INET:
            bind_addr = self._bind
        elif self._bindv6 and af == socket.AF_INET6:
            bind_addr = self._bindv6
        else:
            bind_addr = self._accept_address[0]

        bind_addr = bind_addr.replace("::ffff:", "")
        if bind_addr in self._ignore_bind_list:
            bind_addr = None
        if bind_addr:
            local_addrs = socket.getaddrinfo(bind_addr, 0, 0, socket.SOCK_STREAM, socket.SOL_TCP)
            if local_addrs[0][0] == af:
                logging.debug("bind %s" % (bind_addr,))
                try:
                    sock.bind((bind_addr, 0))
                except Exception as e:
                    logging.warn("bind %s fail" % (bind_addr,))
asyncdns.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def handle_event(self, sock, fd, event):
        if sock != self._sock:
            return
        if event & eventloop.POLL_ERR:
            logging.error('dns socket err')
            self._loop.remove(self._sock)
            self._sock.close()
            # TODO when dns server is IPv6
            self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
                                       socket.SOL_UDP)
            self._sock.setblocking(False)
            self._loop.add(self._sock, eventloop.POLL_IN, self)
        else:
            data, addr = sock.recvfrom(1024)
            if addr not in self._servers:
                logging.warn('received a packet other than our dns')
                return
            self._handle_data(data)
udprelay.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _socket_bind_addr(self, sock, af):
        bind_addr = ''
        if self._bind and af == socket.AF_INET:
            bind_addr = self._bind
        elif self._bindv6 and af == socket.AF_INET6:
            bind_addr = self._bindv6

        bind_addr = bind_addr.replace("::ffff:", "")
        if bind_addr in self._ignore_bind_list:
            bind_addr = None
        if bind_addr:
            local_addrs = socket.getaddrinfo(bind_addr, 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP)
            if local_addrs[0][0] == af:
                logging.debug("bind %s" % (bind_addr,))
                try:
                    sock.bind((bind_addr, 0))
                except Exception as e:
                    logging.warn("bind %s fail" % (bind_addr,))
auth_chain.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def insert(self, connection_id):
        if not self.enable:
            logging.warn('obfs auth: not enable')
            return False
        if not self.is_active():
            self.re_enable(connection_id)
        self.update()
        if connection_id < self.front:
            logging.warn('obfs auth: deprecated id, someone replay attack')
            return False
        if connection_id > self.front + 0x4000:
            logging.warn('obfs auth: wrong id')
            return False
        if connection_id in self.alloc:
            logging.warn('obfs auth: duplicate id, someone replay attack')
            return False
        if self.back <= connection_id:
            self.back = connection_id + 1
        self.alloc[connection_id] = 1
        while (self.front in self.alloc) or self.front + 0x1000 < self.back:
            if self.front in self.alloc:
                del self.alloc[self.front]
            self.front += 1
        self.addref()
        return True
auth.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def insert(self, connection_id):
        if not self.enable:
            logging.warn('obfs auth: not enable')
            return False
        if not self.is_active():
            self.re_enable(connection_id)
        self.update()
        if connection_id < self.front:
            logging.warn('obfs auth: deprecated id, someone replay attack')
            return False
        if connection_id > self.front + 0x4000:
            logging.warn('obfs auth: wrong id')
            return False
        if connection_id in self.alloc:
            logging.warn('obfs auth: duplicate id, someone replay attack')
            return False
        if self.back <= connection_id:
            self.back = connection_id + 1
        self.alloc[connection_id] = 1
        while (self.front in self.alloc) or self.front + 0x1000 < self.back:
            if self.front in self.alloc:
                del self.alloc[self.front]
            self.front += 1
        return True
auth.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def insert(self, client_id, connection_id):
        if self.client_id.get(client_id, None) is None or not self.client_id[client_id].enable:
            if self.client_id.first() is None or len(self.client_id) < self.max_client:
                if client_id not in self.client_id:
                    #TODO: check
                    self.client_id[client_id] = client_queue(connection_id)
                else:
                    self.client_id[client_id].re_enable(connection_id)
                return self.client_id[client_id].insert(connection_id)

            if not self.client_id[self.client_id.first()].is_active():
                del self.client_id[self.client_id.first()]
                if client_id not in self.client_id:
                    #TODO: check
                    self.client_id[client_id] = client_queue(connection_id)
                else:
                    self.client_id[client_id].re_enable(connection_id)
                return self.client_id[client_id].insert(connection_id)

            logging.warn('auth_sha1_v2: no inactive client')
            return False
        else:
            return self.client_id[client_id].insert(connection_id)
base.py 文件源码 项目:PyCanvas 作者: PGower 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def depaginate(self, response, data_key=None):
        logging.debug('Attempting to depaginate response from {}'.format(response.url))
        all_data = []
        this_data = self.extract_data_from_response(response, data_key=data_key)
        if this_data is not None:
            if type(this_data) == list:
                all_data += this_data
            else:
                all_data.append(this_data)

        if self.has_pagination_links(response):
            pagination_links = self.extract_pagination_links(response)
            while 'next' in pagination_links:
                response = self.session.get(pagination_links['next'])
                pagination_links = self.extract_pagination_links(response)
                this_data = self.extract_data_from_response(response, data_key=data_key)
                if this_data is not None:
                    if type(this_data) == list:
                        all_data += this_data
                    else:
                        all_data.append(this_data)
        else:
            logging.warn('Response from {} has no pagination links.'.format(response.url))
        return all_data
users.py 文件源码 项目:klaxer 作者: klaxer 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def approve(user):
    """Approve a registered user so that they can use API endpoints.

    :param user: an instance of the `KlaxerUser` object to verify
    :returns: an instance of the user that matched after updating it
    :rtype: `klaxer.users.KlaxerUser`

    """
    if user.approved:
        logging.warn('noop - User %d already approved', user.id)
        return user
    user.approved = True
    for message in user.messages:
        if message.text == config.MSG_WELCOME:
            session.delete(message)
    session.add(user)
    session.commit()
    return user
dashboard.py 文件源码 项目:n1mm_view 作者: n1kdo 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def update_charts(q, event, size):
    try:
        os.nice(10)
    except AttributeError:
        logging.warn("can't be nice to windows")
    q.put((CRAWL_MESSAGE, 4, 'Chart engine starting...'))
    base_map = graphics.create_map()
    last_qso_timestamp = 0
    q.put((CRAWL_MESSAGE, 4, ''))

    try:
        while not event.is_set():
            t0 = time.time()
            last_qso_timestamp = load_data(size, q, base_map, last_qso_timestamp)
            t1 = time.time()
            delta = t1 - t0
            update_delay = config.DATA_DWELL_TIME - delta
            if update_delay < 0:
                update_delay = config.DATA_DWELL_TIME
            logging.debug('Next data update in %f seconds', update_delay)
            event.wait(update_delay)
    except Exception, e:
        logging.exception('Exception in update_charts', exc_info=e)
        q.put((CRAWL_MESSAGE, 4, 'Chart engine failed.', graphics.YELLOW, graphics.RED))
api.py 文件源码 项目:sndlatr 作者: Schibum 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def post(self, id):
        job = models.RemindJob.get_by_id(int(id))
        user = auth.get_current_user()
        if job is None:
            raise HTTPNotFound()
        if job.user_id != user.user_id():
            raise HTTPForbidden()
        if job.state != 'scheduled':
            logging.warn('check_reply for non-scheduled job not possible')
            raise HTTPBadRequest()
        reply = validation.disabled_reply_schema(self.json)
        job.disabled_reply = models.DisabledReply(
            message_id=reply['messageId'],
            from_name=reply['fromName'],
            from_email=reply['fromEmail'])
        job.add_to_check_reply_queue()
        self.response_json(None)
models.py 文件源码 项目:sndlatr 作者: Schibum 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def disable_if_replied(self, auth_token):
        """ Checks if there was a reply and disables job if there was. """
        logging.info('processing disable_if_replied for {}'.format(self.key))
        if self.state != 'checking':
            logging.warn('job not in checking state, skipping')
            return
        if not self.only_if_noreply:
            logging.warn('only_if_noreply not configured, skipping')
            self.state = 'scheduled'
            self.put()
            return
        mailman = gmail.Mailman(self.user_email, auth_token)
        try:
            reply = self.find_reply(mailman)
        finally:
            mailman.quit()
        if reply is not None:
            logging.info('reply found, disabling job')
            self.state = 'disabled'
            self.disabled_reply = DisabledReply.from_gmail_dict(reply)
            self.put()
        else:
            self.state = 'scheduled'
            self.put()
roster.py 文件源码 项目:xmpp-cloud-auth 作者: jsxc 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def roster_cloud(self):
        '''Query roster JSON from cloud'''
        success, code, message, text = self.verbose_cloud_request({
            'operation': 'sharedroster',
            'username':  self.username,
            'domain':    self.authDomain
        })
        if success:
            if code is not None and code != requests.codes.ok:
                return code, None
            else:
                sr = None
                try:
                    sr = message['data']['sharedRoster']
                    return sr, text
                except Exception, e:
                    logging.warn('Weird response: ' + str(e))
                    return message, text
        else:
            return False, None
saslauthd_io.py 文件源码 项目:xmpp-cloud-auth 作者: jsxc 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def read_request(cls):
        field_no = 0
        fields = [None, None, None, None]
        length_field = sys.stdin.read(2)
        while len(length_field) == 2:
            (size,) = unpack('>H', length_field)
            val = sys.stdin.read(size)
            if len(val) != size:
               logging.warn('premature EOF while reading field %d: %d != %d' % (field_no, len(val), size))
               return
            fields[field_no] = val
            field_no = (field_no + 1) % 4
            if field_no == 0:
                logging.debug('from_saslauthd got %s, %s, %s, %s' % tuple(fields))
                yield ('auth', fields[0], fields[3], fields[1])
            length_field = sys.stdin.read(2)
roster_thread.py 文件源码 项目:xmpp-cloud-auth 作者: jsxc 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def roster_background_thread(self, sr):
        '''Entry for background roster update thread'''
        try:
            logging.debug('roster_thread for ' + str(sr))
            # Allow test hooks with static ejabberd_controller
            if hasattr(self.ctx, 'ejabberd_controller') and self.ctx.ejabberd_controller is not None:
                e = self.ctx.ejabberd_controller
            else:
                e = ejabberdctl(self.ctx)
            groups, commands = self.roster_update_users(e, sr)
            self.roster_update_groups(e, groups)
            # For some reason, the vcard changes are not pushed to the clients. Rinse and repeat.
# Maybe not necessary with synchronous thread?
#            for cmd in commands:
#                e.execute(cmd)
            self.ctx.shared_roster_db.sync()
        except AttributeError:
            pass # For tests
        except Exception, err:
            (etype, value, tb) = sys.exc_info()
            traceback.print_exception(etype, value, tb)
            logging.warn('roster_groups thread: %s:\n%s'
                         % (str(err), ''.join(traceback.format_tb(tb))))
            return False
GANcheckpoints.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def load_weights(fname, params):
    # params = lasagne.layers.get_all_params(l_out,trainable=True)+[log_sigma]+[x for x in lasagne.layers.get_all_params(l_out) if x.name[-4:]=='mean' or x.name[-7:]=='inv_std']
    names = [ par.name for par in params ]
    if len(names)!=len(set(names)):
        raise ValueError('need unique param names')

    param_dict = np.load(fname)
    for param in params:
        if param.name in param_dict:
            stored_shape = np.asarray(param_dict[param.name].shape)
            param_shape = np.asarray(param.get_value().shape)
            if not np.all(stored_shape == param_shape):
                warn_msg = 'shape mismatch:'
                warn_msg += '{} stored:{} new:{}'.format(param.name, stored_shape, param_shape)
                warn_msg += ', skipping'
                warnings.warn(warn_msg)
            else:
                param.set_value(param_dict[param.name])
        else:
            logging.warn('unable to load parameter {} from {}'.format(param.name, fname))
    if 'metadata' in param_dict:
        metadata = pickle.loads(str(param_dict['metadata']))
    else:
        metadata = {}
    return metadata
div_all_conditions_sol.py 文件源码 项目:python 作者: micronicstraining 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def main(args):
    num, den = args
    try:
        num, den = float(num), float(den)
    except ValueError as e:
        logging.error('Invalid input')
        return INVALID_INPUT

    if den == 0:
        # this is a run-time error but not a type error
        # can be considered a warning or an error based on use case
        # written here as mere warning.
        logging.warn('Invalid denominator input!')
        return DIV_BY_ZERO_EXIT

    if math.isnan(num) or math.isnan(den):
        return INVALID_INPUT_NAN

    if math.isinf(num) or math.isinf(den):
        return INVALID_INPUT_INF

    print('Answer: ' + str(num / den))
    return 0
cache_clt.py 文件源码 项目:annotated-py-asyncio 作者: hhstore 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def testing(label, cache, loop):

    def w(g):
        return asyncio.wait_for(g, args.timeout, loop=loop)

    key = 'foo-%s' % label
    while True:
        logging.info('%s %s', label, '-'*20)
        try:
            ret = yield from w(cache.set(key, 'hello-%s-world' % label))
            logging.info('%s set %s', label, ret)
            ret = yield from w(cache.get(key))
            logging.info('%s get %s', label, ret)
            ret = yield from w(cache.delete(key))
            logging.info('%s del %s', label, ret)
            ret = yield from w(cache.get(key))
            logging.info('%s get2 %s', label, ret)
        except asyncio.TimeoutError:
            logging.warn('%s Timeout', label)
        except Exception as exc:
            logging.exception('%s Client exception: %r', label, exc)
            break
LogWrapper.py 文件源码 项目:tagberry 作者: csailer 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _logWriter(self,level,message,exception=None):

        self._logger.setLevel(level)
        self._fh.setLevel(level)
        self._ch.setLevel(level)
        if(exception!=None):
            exFormatted = self._formatException(exception)

        msg = "%s%s" % (message,exFormatted)

        if(level==logging.DEBUG):
           logging.debug(msg) 
        elif(level==logging.INFO):
           logging.info(msg) 
        elif(level==logging.WARN):
           logging.warn(msg) 
        elif(level==logging.FATAL):
           logging.fatal(msg) 
        if(level==logging.ERROR):
           logging.error(msg)
geco_gwpy_dump.py 文件源码 项目:geco_data 作者: stefco 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def read(self, **kwargs):
        """Read this timeseries from file using GWpy. If the file is not
        present, an IOError is raised, UNLESS an unsuccessful attempt has been
        made to download the file, in which case it raises an
        NDS2Exception (a custom error type)."""
        try:
            return gwpy.timeseries.TimeSeries.read(self.fname, **kwargs)
        except IOError as e:
            if not self.query_failed():
                msg = ('tried concatenating data, but a download attempt seems '
                       'not to have been made for this query: {} See IOError '
                       'message: {}').format(self, e)
                logging.error(msg)
                raise IOError(('Aborting concatenation. Neither an error log '
                               'file nor a saved timeseries file were found '
                               'for this query: {}').format(self))
            else:
                logging.warn(('While reading, encountered failed query: '
                              '{}. Padding...').format(self))
                raise NDS2Exception(('This query seems to have failed '
                                     'downloading: {}').format(self))
geco_gwpy_dump.py 文件源码 项目:geco_data 作者: stefco 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _get_missing_m_trend(self, pad='DEFAULT_PAD', **kwargs):
        """Get a single second of missing data."""
        logging.debug('Fetching missing m-trend: {}'.format(self))
        missing_buf = self.fetch() # explicitly fetch from NDS2
        trend = self.channel.split('.')[1].split(',')[0]
        # make m-trend value for this minute based on trend extension
        if len(np.nonzero(missing_buf == -1)[0]) != 0:
            # this won't actually check for anything at the moment because
            # gwpy.timeseries.TimeSeries.fetch() does not have a padding option
            # yet
            logging.warn('Still missing data in {}'.format(self))
        elif trend == 'mean':
            buf_trend = missing_buf.mean()
        elif trend == 'min':
            buf_trend = missing_buf.min()
        elif trend == 'max':
            buf_trend = missing_buf.max()
        elif trend == 'rms':
            buf_trend = missing_buf.rms(60)[0]
        elif trend == 'n':
            buf_trend = missing_buf.sum()
        else:
            raise ValueError('Unrecognized trend type: {}'.format(trend))
        return buf_trend


问题


面经


文章

微信
公众号

扫码关注公众号