python类warning()的实例源码

stratum_http.py 文件源码 项目:electrum-martexcoin-server 作者: martexcoin 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, addr, requestHandler=StratumJSONRPCRequestHandler,
                 logRequests=False, encoding=None, bind_and_activate=True,
                 address_family=socket.AF_INET):
        self.logRequests = logRequests
        StratumJSONRPCDispatcher.__init__(self, encoding)
        # TCPServer.__init__ has an extra parameter on 2.6+, so
        # check Python version and decide on how to call it
        vi = sys.version_info
        self.address_family = address_family
        if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
            # Unix sockets can't be bound if they already exist in the
            # filesystem. The convention of e.g. X11 is to unlink
            # before binding again.
            if os.path.exists(addr):
                try:
                    os.unlink(addr)
                except OSError:
                    logging.warning("Could not unlink socket %s", addr)

        SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)

        if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
            flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
            flags |= fcntl.FD_CLOEXEC
            fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
yellowfin.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def update_hyper_param(self):
    for group in self._optimizer.param_groups:
      group['momentum'] = self._mu_t
      #group['momentum'] = max(self._mu, self._mu_t)
      if self._force_non_inc_step == False:
        group['lr'] = self._lr_t * self._lr_factor
        # a loose clamping to prevent catastrophically large move. If the move
        # is too large, we set lr to 0 and only use the momentum to move
        if self._adapt_clip and (group['lr'] * np.sqrt(self._global_state['grad_norm_squared']) >= self._catastrophic_move_thresh):
          group['lr'] = self._catastrophic_move_thresh / np.sqrt(self._global_state['grad_norm_squared'] + eps)
          if self._verbose:
            logging.warning("clip catastropic move!")
      elif self._iter > self._curv_win_width:
        # force to guarantee lr * grad_norm not increasing dramatically. 
        # Not necessary for basic use. Please refer to the comments
        # in YFOptimizer.__init__ for more details
        self.lr_grad_norm_avg()
        debias_factor = self.zero_debias_factor()
        group['lr'] = min(self._lr * self._lr_factor,
          2.0 * self._global_state["lr_grad_norm_avg_min"] \
          / (np.sqrt(np.exp(self._global_state['grad_norm_squared_avg_log'] / debias_factor) ) + eps) )
    return
libkas.py 文件源码 项目:kas 作者: siemens 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def _read_stream(stream, callback):
    """
        This asynchronous method reads from the output stream of the
        application and transfers each line to the callback function.
    """
    while True:
        line = yield from stream.readline()
        try:
            line = line.decode('utf-8')
        except UnicodeDecodeError as err:
            logging.warning('Could not decode line from stream, ignore it: %s',
                            err)
        if line:
            callback(line)
        else:
            break
libkas.py 文件源码 项目:kas 作者: siemens 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def ssh_setup_agent(config, envkeys=None):
    """
        Starts the ssh-agent
    """
    envkeys = envkeys or ['SSH_PRIVATE_KEY']
    output = os.popen('ssh-agent -s').readlines()
    for line in output:
        matches = re.search(r"(\S+)\=(\S+)\;", line)
        if matches:
            config.environ[matches.group(1)] = matches.group(2)

    for envkey in envkeys:
        key = os.environ.get(envkey)
        if key:
            ssh_add_key(config.environ, key)
        else:
            logging.warning('%s is missing', envkey)
config.py 文件源码 项目:kas 作者: siemens 项目源码 文件源码 阅读 92 收藏 0 点赞 0 评论 0
def setup_environ(self):
        """
            Sets the environment variables for process that are
            started by kas.
        """
        distro_base = get_distro_id_base().lower()
        if distro_base in ['fedora', 'suse', 'opensuse']:
            self.environ = {'LC_ALL': 'en_US.utf8',
                            'LANG': 'en_US.utf8',
                            'LANGUAGE': 'en_US'}
        elif distro_base in ['debian', 'ubuntu']:
            self.environ = {'LC_ALL': 'en_US.UTF-8',
                            'LANG': 'en_US.UTF-8',
                            'LANGUAGE': 'en_US:en'}
        else:
            logging.warning('kas: "%s" is not a supported distro. '
                            'No default locales set.', distro_base)
            self.environ = {}
resource.py 文件源码 项目:core-framework 作者: RedhawkSDR 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def getPort(self, name):
        """The default behavior of getPort() will automatically
        return ports as defined by 'usesport' and 'providesport'
        static class attributes."""
        self._log.trace("getPort(%s)", name)
        try:
            portdef = self.__ports[name]
        except KeyError:
            self._log.warning("getPort() could not find port %s", name)
            raise CF.PortSupplier.UnknownPort()
        else:
            portobj = portdef.__get__(self)
            if portobj == None:
                self._log.warning("component did not implement port %s",name)
                raise CF.PortSupplier.UnknownPort()
            port = portobj._this()
            if not portdef.isValid(port):
                self._log.warning("getPort() for %s did match required repid", name)
            self._log.trace("getPort() --> %s", port)
            return port
__init__.py 文件源码 项目:core-framework 作者: RedhawkSDR 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def GetSCAFileContents( url ):
    fileContents = None
    scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
    if scheme=="sca" :
       queryAsDict = dict([x.split("=") for x in query.split("&")])
       try:
           orb=CORBA.ORB_init()
           fileSys = orb.string_to_object(queryAsDict["fs"])
       except KeyError:
            logging.warning("sca URI missing fs query parameter")
       except:
           logging.warning("Unable to get ORB reference")
       else:
            if fileSys == None:
                logging.warning("Failed to lookup file system")
            else:
                try:
                    scaFile = fileSys.open(path, True)
                    fileSize = scaFile.sizeOf()
                    fileContents = scaFile.read(fileSize)
                    scaFile.close()
                finally:
                    pass
    return fileContents
remote.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def handle_payload(self, handle_nr, method=None, data=None, fault=None):
        """
        Handle a callback/response payload or fault.

        :param handle_nr: Handler ID
        :param method: Method name
        :param data: Parsed payload data.
        :param fault: Fault object.
        """
        if handle_nr in self.handlers:
            await self.handle_response(handle_nr, method, data, fault)
        elif method and data is not None:
            if method == 'ManiaPlanet.ModeScriptCallbackArray':
                await self.handle_scripted(handle_nr, method, data)
            elif method == 'ManiaPlanet.ModeScriptCallback':
                await self.handle_scripted(handle_nr, method, data)
            else:
                await self.handle_callback(handle_nr, method, data)
        elif fault is not None:
            raise TransportException('Handle payload got invalid parameters, see fault exception! {}'.format(fault)) from fault
        else:
            print(method, handle_nr, data)
            logging.warning('Received gbx data, but handle wasn\'t known or payload invalid: handle_nr: {}, method: {}'.format(
                handle_nr, method,
            ))
manager.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def finish_reservations(self):  # pragma: no cover
        """
        The method will copy all reservations to the actual signals. (PRIVATE)
        """
        for sig_name, recs in self.reserved.items():
            for func, kwargs in recs:
                try:
                    signal = self.get_signal(sig_name)
                    signal.connect(func, **kwargs)
                except Exception as e:
                    logging.warning('Signal not found: {}, {}'.format(
                        sig_name, e
                    ), exc_info=sys.exc_info())

        for sig_name, recs in self.reserved_self.items():
            for func, slf in recs:
                try:
                    signal = self.get_signal(sig_name)
                    signal.set_self(func, slf)
                except Exception as e:
                    logging.warning(str(e), exc_info=sys.exc_info())

        self.reserved = dict()
        self.reserved_self = dict()
manager.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def load_matchsettings(self, filename):
        """
        Load Match Settings file and insert it into the current map playlist.

        :param filename: File to load, relative to Maps folder.
        :return: Boolean if loaded.
        """
        try:
            if not await self._instance.storage.driver.exists(
                os.path.join(self._instance.storage.MAP_FOLDER, filename)
            ):
                raise MapException('Can\'t find match settings file. Does it exist?')
            else:
                self._instance.gbx('LoadMatchSettings', filename)
        except Exception as e:
            logging.warning('Can\'t load match settings!')
            raise MapException('Can\'t load matchsettings according the dedicated server, tried loading from \'{}\'!'.format(filename)) from e
views.py 文件源码 项目:django_pipedrive 作者: MasAval 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def index(request):

    enable_hstore()

    try:
        if request.method == 'POST':

            json_data = json.loads(request.body)
            meta = json_data[u'meta']

            # API v1
            if meta[u'v'] == 1:
                return handle_v1(json_data)
            else:
                raise NonImplementedVersionException()

    except IntegrityError as e:
        logging.warning(e.message)
        logging.warning("Forcing full sync from pipedrive")
        PipedriveModel.sync_from_pipedrive()

    return HttpResponse("Hello, world!")
models.py 文件源码 项目:django_pipedrive 作者: MasAval 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sync_one(cls, external_id, last_error=None):
        post_data = cls.pipedrive_api_client.get_instance(external_id)

        # Error code from the API
        if not post_data[u'success']:
            logging.error(post_data)
            raise UnableToSyncException(cls, external_id)
        try:
            return cls.update_or_create_entity_from_api_post(post_data[u'data'])
        except IntegrityError as e:
            logging.warning(e)
            if e.message == last_error:
                raise SameErrorTwiceSyncException(cls, external_id, e.message)
            match = re.search('.*Key \((.*)\)=\((.*)\).*', e.message)
            if match:
                field_name = match.group(1)
                field_id = match.group(2)
                model = cls.field_model_map(field_name)
                model.sync_one(field_id)
                return cls.sync_one(external_id, e.message)
            else:
                raise Exception("Could not handle error message")
algorithm.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _create_daily_stats(self, perfs):
        # create daily and cumulative stats dataframe
        daily_perfs = []
        # TODO: the loop here could overwrite expected properties
        # of daily_perf. Could potentially raise or log a
        # warning.
        for perf in perfs:
            if 'daily_perf' in perf:

                perf['daily_perf'].update(
                    perf['daily_perf'].pop('recorded_vars')
                )
                perf['daily_perf'].update(perf['cumulative_risk_metrics'])
                daily_perfs.append(perf['daily_perf'])
            else:
                self.risk_report = perf

        daily_dts = [np.datetime64(perf['period_close'], utc=True)
                     for perf in daily_perfs]
        daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)

        return daily_stats
samplesheet.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def rows_are_valid_csv(rows):
    """
    Determine whether the rows comprise a readable simple CSV,
    with a lane number, sample and index (in that order)
    :type rows: list[list[string]]
    :rtype: bool
    """
    if not rows:
        return False

    if row_is_simple_header(rows[0]):
        data_idx = 1
    else:
        data_idx = 0

    pop_rows = [row for row in rows[data_idx:] if row]
    tuples = [row_is_simple_data(row) for row in pop_rows]
    for tup in tuples:
        if tup[1]:
            logging.warning(tup[1])

    return all([tup[0] for tup in tuples])
samplesheet.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def rows_are_iem_samplesheet(rows):
    """
    Determine whether the rows comprise an Illumina Experiment Manager (IEM)
    sample sheet by checking for the presence of a [Data] section with
    sample header.

    :type rows: list[list[string]]
    :rtype: bool
    """
    # criteria: has to have [Data] section with recognized sample index.
    section_gen = rows_iem_section_generator(rows)
    for section in section_gen:
        if section_is_valid_data(section):
            if not iem_rows_all_have_sample_id(section.rows):
                logging.warning("Blank Sample_ID entries detected in data section")
                return False
            else:
                return True
    return False
lambda_setup.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _create_lambda(arn, func_name, func_desc, lambda_handler, lambda_main,
                   runtime):
    func = dict()
    lamb = boto3.client('lambda')
    with open(temp_deploy_zip) as deploy:
        func['ZipFile'] = deploy.read()
    try:
        resp = lamb.create_function(
            FunctionName=func_name, Runtime=runtime, Publish=True,
            Description=func_desc,
            Role=arn, Code=func, Handler='{0}.{1}'.format(
                lambda_main, lambda_handler
            ))
        logging.info("Create Lambda Function resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating function '{1}'.".format(
                ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
lambda_setup.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _create_function_alias(func_alias, func_name, func_version):
    lamb = boto3.client('lambda')

    try:
        resp = lamb.create_alias(
            Name=func_alias,
            FunctionName=func_name,
            FunctionVersion=func_version
        )
        logging.info("Create Lambda Alias resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating alias '{1}'.".format(
                ce, func_alias))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
lambda_setup.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _update_lambda_function(zip_file, func_name):
    lamb = boto3.client('lambda')
    try:
        resp = lamb.update_function_code(
            FunctionName=func_name,
            ZipFile=zip_file.read(),
            Publish=True
        )
        return resp['Version']
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning(
                "Validation Error {0} updating function '{1}'.".format(
                    ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
lambda_setup.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _update_lambda_alias(func_alias, func_name, func_version):
    lamb = boto3.client('lambda')
    try:
        resp = lamb.update_alias(
            Name=func_alias,
            FunctionName=func_name,
            FunctionVersion=func_version
        )
        return resp['AliasArn']
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning(
                "Validation Error {0} updating alias '{1}'.".format(
                    ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
sampler.py 文件源码 项目:bnn-analysis 作者: myshkov 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def sample_posterior(self, session=None, return_stats=False, **kwargs):
        """
        Returns a new sample from the posterior distribution of the parameters.
        :param return_stats: Whether to return sampling process statistics
        :return: the generated sample
        """

        # make a number of tries to draw a sample
        for i in range(self.draw_retries_num):
            sample, stats = self._sample_posterior(session=session, return_stats=return_stats, **kwargs)
            if sample is not None:
                break

        if sample is not None:
            self.sample_number += 1
        else:
            logging.warning('Impossible to draw a sample with the specified parameters.')

        if return_stats:
            return sample, stats

        return sample
pg_wikipedia.py 文件源码 项目:pg-wikipedia 作者: gitenberg-dev 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def get_item_summary(wd_id, lang='en'):
    if wd_id is None:
        return None
    try:
        r = requests.get(u'https://www.wikidata.org/wiki/Special:EntityData/{}.json'.format(wd_id))
    except:
        logging.warning( u"couldn't get https://www.wikidata.org/wiki/Special:EntityData/{}.json".format(wd_id))
        return ""
    try:
        title = r.json()['entities'][wd_id]['sitelinks']['{}wiki'.format(lang)]['title']
        try:
            return wikipedia.summary(title)
        except (PageError,WikipediaException,DisambiguationError):
            logging.warning(u"couldn't get wikipedia.summary({})".format(title))
            return ''
    except ValueError:
        #not JSON
        return ""
csvrotator.py 文件源码 项目:rpi-can-logger 作者: JonnoFTW 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _make_csv_writer(self):
        """

        :return:
        """
        self._buffer = StringIO()

        self._bytes_written = 0
        now = datetime.now()
        self._out_csv = open(self.log_folder + '/' + now.strftime('%Y%m%d_%H%M%S.csv'.format(self.make_random(6))), 'w')
        logging.warning("Writing to {} ({} bytes)".format(self._out_csv.name, self.max_bytes))
        self._out_writer = csv.DictWriter(self._buffer, fieldnames=self.fieldnames, restval=None)
        self._out_writer.writeheader()
        self._out_csv.write(self._buffer.getvalue())
        self._reset_buffer()
        self.writerow({'vid': self.vin})
jsonlogrotator.py 文件源码 项目:rpi-can-logger 作者: JonnoFTW 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _make_writer(self):
        """

        :return:
        """
        self._buffer = StringIO()

        self._bytes_written = 0
        now = datetime.now()
        self.fname = self.log_folder + '/' + now.strftime('%Y%m%d_%H%M%S_{}.json'.format(self.make_random(6)))
        self.fname = str(pathlib.Path(self.fname))
        self._out_fh = open(self.fname, 'w')
        self.write_pid()
        logging.warning("Writing to  {} ({} bytes)".format(self._out_fh.name, self.max_bytes))

        # compress any old files still lying around
        for fname in glob(self.log_folder+"/*.json"):
            if fname != self.fname:
                self._compress(fname)
model.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _create_model(self, **kwargs) -> None:
        """
        Create your TensorFlow model.

        Every model has to define:

        - loss tensor named according to given ``loss_name``
        - input placeholders and output tensors named according to the specified input and output names

        .. warning::
            To support multi-GPU training, all the variables must be created with ``tf.get_variable``
            and appropriate variable scopes.

        :param kwargs: model configuration as specified in ``model`` section of the configuration file
        """
        raise NotImplementedError('`_create_model` method must be implemented in order to construct a new model.')
helpers.py 文件源码 项目:sat6_scripts 作者: RedHatSatellite 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def log_msg(msg, level):
    """Write message to logfile"""

    # If we are NOT in debug mode, only write non-debug messages to the log
    if level == 'DEBUG':
        if DEBUG:
            logging.debug(msg)
            print BOLD + "DEBUG: " + msg + ENDC
    elif level == 'ERROR':
        logging.error(msg)
        tf.write('ERROR:' + msg + '\n')
        print ERROR + "ERROR: " + msg + ENDC
    elif level == 'WARNING':
        logging.warning(msg)
        tf.write('WARNING:' + msg + '\n')
        print WARNING + "WARNING: " + msg + ENDC
    # Otherwise if we ARE in debug, write everything to the log AND stdout
    else:
        logging.info(msg)
        tf.write(msg + '\n')
abstract_related.py 文件源码 项目:gransk 作者: pcbje 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def load_all(self, config):
    """
    Load all existing data.

    :param config: Configuration object.
    :type config: ``dict``
    """
    self.buckets = {}

    for path in glob.glob(os.path.join(
            config[helper.DATA_ROOT], '%s_buckets-*.pickle' % self.NAME)):
      with open(path, 'rb') as inp:
        try:
          for key, value in pickle.load(inp).items():
            if key in self.buckets:
                self.buckets[key]['bins'].update(value['bins'])
            else:
              self.buckets[key] = value
        except:
          logging.warning('could not load related_%s data', self.NAME)
file_cache.py 文件源码 项目:oscars2016 作者: 0x0ece 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, max_age):
      """Constructor.

      Args:
        max_age: Cache expiration in seconds.
      """
      self._max_age = max_age
      self._file = os.path.join(tempfile.gettempdir(), FILENAME)
      f = LockedFile(self._file, 'a+', 'r')
      try:
        f.open_and_lock()
        if f.is_locked():
          _read_or_initialize_cache(f)
        # If we can not obtain the lock, other process or thread must
        # have initialized the file.
      except Exception as e:
        logging.warning(e, exc_info=True)
      finally:
        f.unlock_and_close()
file_cache.py 文件源码 项目:oscars2016 作者: 0x0ece 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def set(self, url, content):
    f = LockedFile(self._file, 'r+', 'r')
    try:
      f.open_and_lock()
      if f.is_locked():
        cache = _read_or_initialize_cache(f)
        cache[url] = (content, _to_timestamp(datetime.datetime.now()))
        # Remove stale cache.
        for k, (_, timestamp) in list(cache.items()):
          if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:
            del cache[k]
        f.file_handle().truncate(0)
        f.file_handle().seek(0)
        json.dump(cache, f.file_handle())
      else:
        logger.debug('Could not obtain a lock for the cache file.')
    except Exception as e:
      logger.warning(e, exc_info=True)
    finally:
      f.unlock_and_close()
__init__.py 文件源码 项目:oscars2016 作者: 0x0ece 项目源码 文件源码 阅读 106 收藏 0 点赞 0 评论 0
def autodetect():
  """Detects an appropriate cache module and returns it.

  Returns:
    googleapiclient.discovery_cache.base.Cache, a cache object which
    is auto detected, or None if no cache object is available.
  """
  try:
    from google.appengine.api import memcache
    from . import appengine_memcache
    return appengine_memcache.cache
  except Exception:
    try:
      from . import file_cache
      return file_cache.cache
    except Exception as e:
      logging.warning(e, exc_info=True)
      return None
api.py 文件源码 项目:upstox-python 作者: upstox 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_instrument_by_symbol(self, exchange, symbol):
        # get instrument given exchange and symbol
        global master_contracts_by_symbol

        exchange = exchange.lower()
        symbol = symbol.lower()
        # check if master contract exists
        if exchange not in master_contracts_by_symbol:
            logging.warning("Cannot find exchange [%s] in master contract. "
                            "Please ensure you have called get_master_contract function first" % exchange)
            return None

        master_contract = master_contracts_by_symbol[exchange]

        if symbol not in master_contract:
            logging.warning("Cannot find symbol [%s:%s] in master contract" % (exchange, symbol))
            return None

        return master_contract[symbol]


问题


面经


文章

微信
公众号

扫码关注公众号