python类reduce()的实例源码

in_db.py 文件源码 项目:bibcure 作者: bibcure 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def update_bibs_in(grouped_bibs, db_abbrev):
    actions = {
        "y": lambda items: [update_in(bibs, db_abbrev) for bibs in items],
        "m": lambda items: [manual_update_in(bibs, db_abbrev) for bibs in items],
        "n": lambda items: items
    }
    print("\n ")
    action = input("Abbreviate everthing?" +
                   "y(yes, automatic)/m(manual)/n(do nothing)")
    grouped_bibs.sort(key=operator.itemgetter('journal'))
    grouped_by_journal = []
    for key, items in groupby(grouped_bibs, lambda i: i["journal"]):
        grouped_by_journal.append(list(items))

    if action in ("y", "m", "n"):
        updated_bibs = actions.get(action)(grouped_by_journal)
    else:
        return update_bibs_in(grouped_bibs, db_abbrev)

    updated_bibs = reduce(lambda a, b: a+b, updated_bibs)
    return updated_bibs
format_nmea.py 文件源码 项目:fdxread 作者: lkarsten 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def checksum(self, samples):
        if samples is None:
            return None

        completed = []
        for sentence in samples:
            assert sentence[0] == "$"
            cksum = reduce(xor, (ord(s) for s in sentence[1:]))
            completed.append("%s*%02X" % (sentence, cksum))

        if len(completed) == 0:
            return None

        # NMEA0183 uses \r\n as line separator even on Unix systems.
        s = ""
        for line in completed:
            s = s + line + "\r\n"
        return s
io.py 文件源码 项目:wurst 作者: IndEcol 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def get_comma_separated_data(raw):
    # Convert to long string
    header, data = "".join(raw).strip().split(" = ")

    # Remove trailing comma
    assert data[-1] == ';'
    data = data[:-1]

    # Remove newline characters and convert to list
    data = eval(data.replace("\n", ''))

    shape = tuple(eval(header[header.index("["):header.index("]") + 1]))
    step_size = functools.reduce(operator.mul, shape) + 1
    years = np.array(data[::step_size], dtype=int)

    data = np.stack([
        np.array(data[1 + index * step_size:(index + 1) * step_size]).reshape(shape)
        for index in range(len(years))
    ], axis=-1)

    return header, years, data
views.py 文件源码 项目:django-codenerix-products 作者: centrologic 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_foreign(self, queryset, search, filters):
        # Filter with search string
        query = [Q(code__icontains=search), ]
        for lang in settings.LANGUAGES_DATABASES:
            query.append(Q(**{"{}__name__icontains".format(lang.lower()): search}))

        qs = queryset.filter(
            reduce(operator.or_, query)
        )
        category = filters.get('ProductForm_category', None)
        if category is None:
            category = filters.get('ProductFormCreate_category', None)
        if category is None:
            category = filters.get('ProductFormCreateCustom_category', None)

        if category:
            qs = qs.filter(category__pk=category)

        return qs[:settings.LIMIT_FOREIGNKEY]
models.py 文件源码 项目:django-codenerix-products 作者: centrologic 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def query_or(cls, query, *values_list, **annotations):
        pop_annotations = False
        if 'pop_annotations' in annotations:
            pop_annotations = annotations['pop_annotations']
            annotations.pop('pop_annotations')

        annotated_keys = annotations.values()
        annotations = {key: F(value) for key, value in annotations.items()}

        if isinstance(query, Iterable):
            query = reduce(or_, query)

        result = cls.objects.filter(query).values(*values_list).annotate(**annotations)

        if pop_annotations:
            for querydict in result:
                for value in annotated_keys:
                    querydict.pop(value)

        return result


# tipos de impuestos aplicables a los productos
sqlite_plugin_helper.py 文件源码 项目:PlasoScaffolder 作者: ClaudiaSaxer 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def GetDistinctColumnsFromSQLQueryData(
      self,
      queries: [sql_query_model.SQLQueryModel]) -> [str]:
    """Get a distinct list of all attributes from multiple queries.

    Args:
      queries ([sql_query_model.SQLQueryModel]): an array of multiple
          SQL query data objects

    Returns:
      list[str]: all distinct attributes used in the query
    """
    if len(queries) != 0:
      list_of_list_of_column_model = [query.columns for query in queries]
      list_of_column_model = functools.reduce(lambda x, y: x + y,
                                              list_of_list_of_column_model)
      list_of_columns_snake_case = [column.GetColumnAsSnakeCase() for column in
                                    list_of_column_model]
      distinct_columns = sorted(set().union(list_of_columns_snake_case))
      return distinct_columns
    else:
      return []
difflib.py 文件源码 项目:kinect-2-libras 作者: inessadl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def ratio(self):
        """Return a measure of the sequences' similarity (float in [0,1]).

        Where T is the total number of elements in both sequences, and
        M is the number of matches, this is 2.0*M / T.
        Note that this is 1 if the sequences are identical, and 0 if
        they have nothing in common.

        .ratio() is expensive to compute if you haven't already computed
        .get_matching_blocks() or .get_opcodes(), in which case you may
        want to try .quick_ratio() or .real_quick_ratio() first to get an
        upper bound.

        >>> s = SequenceMatcher(None, "abcd", "bcde")
        >>> s.ratio()
        0.75
        >>> s.quick_ratio()
        0.75
        >>> s.real_quick_ratio()
        1.0
        """

        matches = reduce(lambda sum, triple: sum + triple[-1],
                         self.get_matching_blocks(), 0)
        return _calculate_ratio(matches, len(self.a) + len(self.b))
bist.py 文件源码 项目:litesdcard 作者: lambdaconcept 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, n_out, n_state=31, taps=[27, 30]):
        self.o = Signal(n_out)

        # # #

        state = Signal(n_state)
        curval = [state[i] for i in range(n_state)]
        curval += [0]*(n_out - n_state)
        for i in range(n_out):
            nv = ~reduce(xor, [curval[tap] for tap in taps])
            curval.insert(0, nv)
            curval.pop()

        self.sync += [
            state.eq(Cat(*curval[:n_state])),
            self.o.eq(Cat(*curval))
        ]
encoder.py 文件源码 项目:segno 作者: heuer 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def calc_structured_append_parity(content):
    """\
    Calculates the parity data for the Structured Append mode.

    :param str content: The content.
    :rtype: int
    """
    if not isinstance(content, str_type):
        content = str(content)
    try:
        data = content.encode('iso-8859-1')
    except UnicodeError:
        try:
            data = content.encode('shift-jis')
        except (LookupError, UnicodeError):
            data = content.encode('utf-8')
    if _PY2:
        data = (ord(c) for c in data)
    return reduce(xor, data)
merger.py 文件源码 项目:otRebuilder 作者: Pal3love 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _Lookup_PairPosFormat1_subtables_flatten(lst, font):
    assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools."

    self = ot.PairPos()
    self.Format = 1
    self.Coverage = ot.Coverage()
    self.Coverage.Format = 1
    self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
    self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)

    # Align them
    glyphs, padded = _merge_GlyphOrders(font,
                        [v.Coverage.glyphs for v in lst],
                        [v.PairSet for v in lst])

    self.Coverage.glyphs = glyphs
    self.PairSet = [_PairSet_flatten([v for v in values if v is not None], font)
                for values in zip(*padded)]
    self.PairSetCount = len(self.PairSet)
    return self
train.py 文件源码 项目:DeepLearning_PlantDiseases 作者: MarkoArsenovic 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def train_stats(m, trainloader, param_list = None):
    stats = {}
    params = filtered_params(m, param_list)    
    counts = 0,0
    for counts in enumerate(accumulate((reduce(lambda d1,d2: d1*d2, p[1].size()) for p in params)) ):
        pass
    stats['variables_optimized'] = counts[0] + 1
    stats['params_optimized'] = counts[1]

    before = time.time()
    losses = train(m, trainloader, param_list=param_list)
    stats['training_time'] = time.time() - before

    stats['training_loss'] = losses[-1] if len(losses) else float('nan')
    stats['training_losses'] = losses

    return stats
forms.py 文件源码 项目:mendelmd 作者: raonyguimaraes 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def filter_queryset(self, term, queryset=None):
        """
        Return QuerySet filtered by search_fields matching the passed term.

        Args:
            term (str): Search term

        Returns:
            QuerySet: Filtered QuerySet

        """
        if queryset is None:
            queryset = self.get_queryset()
        search_fields = self.get_search_fields()
        select = Q()
        term = term.replace('\t', ' ')
        term = term.replace('\n', ' ')
        for t in [t for t in term.split(' ') if not t == '']:
            select &= reduce(lambda x, y: x | Q(**{y: t}), search_fields,
                             Q(**{search_fields[0]: t}))
        return queryset.filter(select).distinct()
test_vad.py 文件源码 项目:vad 作者: bond005 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_calculate_SNR_positive_1(self):
        source_array = [89, -89] * 6000 + [502, -502] * 8000 + [89, -89] * 7000
        source_data = reduce(
            lambda a, b: a + struct.pack('>h', b), source_array[1:], struct.pack('>h', source_array[0])
        )
        sampling_frequency = 8000
        bounds_of_speech = [(2.0 * 6000.0 / sampling_frequency, 2.0 * (6000.0 + 8000.0) / sampling_frequency)]
        silence_energy = reduce(
            lambda a, b: a + b * b,
            source_array[0:(2 * 6000)] + source_array[(2 * (6000 + 8000)):],
            vad.EPS
        ) / (2.0 * (6000.0 + 7000.0))
        speech_energy = reduce(
            lambda a, b: a + b * b,
            source_array[(2 * 6000):(2 * (6000 + 8000))],
            vad.EPS
        ) / (2.0 * 8000.0)
        target_snr = 20.0 * math.log10(speech_energy / silence_energy)
        self.assertAlmostEqual(target_snr, vad.calculate_SNR(source_data, sampling_frequency, bounds_of_speech))
tabulate.py 文件源码 项目:synergy-service 作者: openstack 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _column_type(strings, has_invisible=True):
    """The least generic type all column values are convertible to.

    >>> _column_type(["1", "2"]) is _int_type
    True
    >>> _column_type(["1", "2.3"]) is _float_type
    True
    >>> _column_type(["1", "2.3", "four"]) is _text_type
    True
    >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
    True
    >>> _column_type([None, "brux"]) is _text_type
    True
    >>> _column_type([1, 2, None]) is _int_type
    True
    >>> import datetime as dt
    >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
    True

    """
    types = [_type(s, has_invisible) for s in strings ]
    return reduce(_more_generic, types, int)
io.py 文件源码 项目:wurst 作者: IndEcol 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_space_separated_data(raw):
    assert raw[0].strip().endswith("= [")
    assert raw[-1].strip().endswith("];")

    header = raw[0].replace("= [", "").strip()
    shape = tuple(eval(header[header.index("["):header.index("]") + 1]))
    data = [eval(line.strip().replace("  ", ",")) for line in raw[1:-1]]

    if len(shape) == 1:
        step_size = 1
    else:
        step_size = functools.reduce(operator.mul, shape[:-1])

    years = np.array(data[::step_size + 1], dtype=int)

    subarrays = [
        np.array(data[index * (step_size + 1) + 1:(index + 1) * (step_size + 1)]).reshape(shape)
        for index in range(len(years))
    ]
    return header, years, np.stack(subarrays, axis=-1)
base.py 文件源码 项目:sauna 作者: NicolasLM 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_current_status(cls):
        """Get the worse status of all check results.

        :returns: (status as str, code)
        :rtype: tuple
        """
        from sauna.plugins.base import Plugin
        from sauna import check_results_lock, check_results

        def reduce_status(accumulated, update_value):
            if update_value.status > Plugin.STATUS_CRIT:
                return accumulated
            return accumulated if accumulated > update_value.status else \
                update_value.status

        with check_results_lock:
            code = reduce(reduce_status, check_results.values(), 0)

        return Plugin.status_code_to_str(code), code
scheduler.py 文件源码 项目:sauna 作者: NicolasLM 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, jobs):
        """
        Create a new Scheduler.

        >>> s = Scheduler([Job(1, max, 100, 200)])
        >>> for jobs in s:
        ...    time.sleep(s.tick_duration)

        :param jobs: Sequence of jobs to schedule
        """
        periodicities = {job.periodicity for job in jobs}
        self.tick_duration = reduce(lambda x, y: fractions.gcd(x, y),
                                    periodicities)
        self._ticks = self.find_minimum_ticks_required(self.tick_duration,
                                                       periodicities)
        self._jobs = jobs
        self._current_tick = 0
        logger.debug('Scheduler has {} ticks, each one is {} seconds'.
                     format(self._ticks, self.tick_duration))
hwmon.py 文件源码 项目:sauna 作者: NicolasLM 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def temperature(self, check_config):
        dummy_sensor = Sensor(device_name='Dummy', label='Dummy', value=-1000)
        sensors = self._get_temperatures()
        if check_config.get('sensors'):
            sensors = [
                sensor for sensor in sensors
                if sensor.device_name in check_config.get('sensors', [])
            ]
        sensor = reduce(lambda x, y: x if x.value > y.value else y,
                        sensors,
                        dummy_sensor)
        if sensor is dummy_sensor:
            return self.STATUS_UNKNOWN, 'No sensor found'
        status = self._value_to_status_less(sensor.value, check_config)
        if status > self.STATUS_OK:
            return (
                status,
                'Sensor {}/{} {}°C'.format(sensor.device_name,
                                           sensor.label,
                                           sensor.value)
            )
        return self.STATUS_OK, 'Temperature okay ({}°C)'.format(sensor.value)
layers.py 文件源码 项目:photo-editing-tensorflow 作者: JamesChuanggg 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def linear(input_,
           output_size,
           weights_initializer=initializers.xavier_initializer(),
           biases_initializer=tf.zeros_initializer,
           activation_fn=None,
           trainable=True,
           name='linear'):
  shape = input_.get_shape().as_list()

  if len(shape) > 2:
    input_ = tf.reshape(input_, [-1, reduce(lambda x, y: x * y, shape[1:])])
    shape = input_.get_shape().as_list()

  with tf.variable_scope(name):
    w = tf.get_variable('w', [shape[1], output_size], tf.float32,
        initializer=weights_initializer, trainable=trainable)
    b = tf.get_variable('b', [output_size],
        initializer=biases_initializer, trainable=trainable)
    out = tf.nn.bias_add(tf.matmul(input_, w), b)

    if activation_fn != None:
      return activation_fn(out), w, b
    else:
      return out, w, b
mparray_test.py 文件源码 项目:mpnum 作者: dseuss 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_sumup(nr_sites, local_dim, rank, rgen, dtype):
    mpas = [factory.random_mpa(nr_sites, local_dim, 3, dtype=dtype, randstate=rgen)
            for _ in range(rank if rank is not np.nan else 1)]
    sum_naive = ft.reduce(mp.MPArray.__add__, mpas)
    sum_mp = mp.sumup(mpas)

    assert_array_almost_equal(sum_naive.to_array(), sum_mp.to_array())
    assert all(r <= 3 * rank for r in sum_mp.ranks)
    assert(sum_mp.dtype is dtype)

    weights = rgen.randn(len(mpas))
    summands = [w * mpa for w, mpa in zip(weights, mpas)]
    sum_naive = ft.reduce(mp.MPArray.__add__, summands)
    sum_mp = mp.sumup(mpas, weights=weights)
    assert_array_almost_equal(sum_naive.to_array(), sum_mp.to_array())
    assert all(r <= 3 * rank for r in sum_mp.ranks)
    assert(sum_mp.dtype is dtype)
test_pipeline.py 文件源码 项目:sanergy-public 作者: dssg 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_staff(self):
        dfw2 = self.dfw.copy()
        dfw2 = 0

        staffing = Staffing(self.dfs, self.dfw, dfw2, self.dtr, self.staffing_parameters,self.config)
        roster, s, vars =staffing.staff()

        collectors_day0 =  reduce(lambda x,y: x+y, [s.getVal(vars[i,'DSSG',datetime(2011,11,11)]) for i in range(0,self.staffing_parameters['N'])])
        collectors_day1 =  reduce(lambda x,y: x+y, [s.getVal(vars[i,'DSSG',datetime(2011,11,12)]) for i in range(0,self.staffing_parameters['N'])])
        collectors_day2 =  reduce(lambda x,y: x+y, [s.getVal(vars[i,'DSSG',datetime(2011,11,13)]) for i in range(0,self.staffing_parameters['N'])])
        collectors_day5 =  reduce(lambda x,y: x+y, [s.getVal(vars[i,'DSSG',datetime(2011,11,16)]) for i in range(0,self.staffing_parameters['N'])])
        #Need 2 people on Monday, 3 people on Tuesday, and 1 (-> 2) people on Wednesday. Zero on other days.
        self.assertEqual(collectors_day0, 2)
        self.assertEqual(collectors_day1, 3)
        self.assertEqual(collectors_day2, 2)
        self.assertEqual(collectors_day5, 0)
        self.assertEqual(roster.shape[0], 1)
        self.assertEqual( list(roster.loc['DSSG',[datetime(2011,11,11),datetime(2011,11,12),datetime(2011,11,13)]].values), [collectors_day0,collectors_day1,collectors_day2])
fa_base.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, env_name, num_episodes, alpha, gamma, epsilon, policy, **kwargs):
        """
        base class for RL using lookup table
        :param env_name: name of environment, currently environments whose observation space is Box and action space is
         Discrete are supported. see https://github.com/openai/gym/wiki/Table-of-environments
        :param num_episodes: number of episode for training
        :param alpha:
        :param gamma:
        :param epsilon:
        :param kwargs: other arguments.
        """
        super(FABase, self).__init__(env_name, num_episodes, alpha, gamma, policy, epsilon=epsilon, **kwargs)

        if not isinstance(self.env.action_space, gym.spaces.Discrete) or \
                not isinstance(self.env.observation_space, gym.spaces.Box):
            raise NotImplementedError("action_space should be discrete and "
                                      "observation_space should be box")

        self.obs_shape = self.env.observation_space.shape
        self.obs_size = reduce(lambda x, y: x * y, self.obs_shape)
        self.action_size = self.env.action_space.n
        self._feature = torch.Tensor(self.action_size, self.obs_size)
        self._weight = None
pg_base.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, env_name, num_episodes, alpha, gamma, policy, **kwargs):
        """
        base class for RL using policy gradient
        :param env_name: name of environment, currently environments whose observation space is Box and action space is
         Discrete are supported. see https://github.com/openai/gym/wiki/Table-of-environments
        :param num_episodes:
        :param alpha:
        :param gamma:
        :param policy:
        :param kwargs:
        """
        super(PGBase, self).__init__(env_name, num_episodes, alpha, gamma, policy, **kwargs)
        if not isinstance(self.env.action_space, gym.spaces.Discrete) or \
                not isinstance(self.env.observation_space, gym.spaces.Box):
            raise NotImplementedError("action_space should be discrete and "
                                      "observation_space should be box")
        self.obs_shape = self.env.observation_space.shape
        self.obs_size = reduce(lambda x, y: x * y, self.obs_shape)
        self.action_size = self.env.action_space.n
        self._feature = None
        self._weight = None
modelstorage.py 文件源码 项目:health-mosconi 作者: GNUHealth-Mosconi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def search_read(cls, domain, offset=0, limit=None, order=None,
            fields_names=None):
        '''
        Call search and read functions at once.
        Useful for the client to reduce the number of calls.
        '''
        records = cls.search(domain, offset=offset, limit=limit, order=order)

        if not fields_names:
            fields_names = cls._fields.keys()
        if 'id' not in fields_names:
            fields_names.append('id')
        rows = cls.read(map(int, records), fields_names)
        index = {r.id: i for i, r in enumerate(records)}
        rows.sort(key=lambda r: index[r['id']])
        return rows
tensor.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def view(self, *args):
        dst = self.new()
        if len(args) == 1 and isinstance(args[0], torch.Size):
            sizes = args[0]
        else:
            sizes = torch.Size(args)
        sizes = _infer_sizes(sizes, self.nelement())
        numel = reduce(lambda a, b: a * b, sizes) if len(sizes) > 0 else 0

        if numel != self.nelement():
            def format_size(size):
                return 'x'.join(str(v) for v in size) if len(size) > 0 else '0'
            raise ValueError(
                "view of size '{0}' is invalid for input of size '{1}'"
                .format(format_size(sizes), format_size(self.size())))
        if not self.is_contiguous():
            raise ValueError("input should be contiguous")
        if self.storage() is not None:
            dst.set_(self.storage(), self.storage_offset(), sizes)
        return dst
difflib.py 文件源码 项目:hostapd-mana 作者: adde88 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def ratio(self):
        """Return a measure of the sequences' similarity (float in [0,1]).

        Where T is the total number of elements in both sequences, and
        M is the number of matches, this is 2.0*M / T.
        Note that this is 1 if the sequences are identical, and 0 if
        they have nothing in common.

        .ratio() is expensive to compute if you haven't already computed
        .get_matching_blocks() or .get_opcodes(), in which case you may
        want to try .quick_ratio() or .real_quick_ratio() first to get an
        upper bound.

        >>> s = SequenceMatcher(None, "abcd", "bcde")
        >>> s.ratio()
        0.75
        >>> s.quick_ratio()
        0.75
        >>> s.real_quick_ratio()
        1.0
        """

        matches = reduce(lambda sum, triple: sum + triple[-1],
                         self.get_matching_blocks(), 0)
        return _calculate_ratio(matches, len(self.a) + len(self.b))
train.py 文件源码 项目:deepcut 作者: rkcosmos 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def generate_words(files):
    """
    Transform list of files to list of words,
    removing new line character
    and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol
    """

    repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': ''}

    words_all = []
    for i, file in enumerate(files):
        lines = open(file, 'r')
        for line in lines:
            line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line)
            words = [word for word in line.split("|") if word is not '\n']
            words_all.extend(words)
    return words_all
dispycos.py 文件源码 项目:pycos 作者: pgiri 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def status(self):
        pending = sum(node.cpus_used for node in self._nodes.values())
        servers = functools.reduce(operator.add, [list(node.servers.keys())
                                                  for node in self._nodes.values()], [])
        return {'Client': self._cur_computation._pulse_task.location if self._cur_computation else '',
                'Pending': pending, 'Nodes': list(self._nodes.keys()), 'Servers': servers
                }
env_converter.py 文件源码 项目:drl.pth 作者: seba-1511 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def numel(x):
    if hasattr(x, 'shape'):
        return reduce(lambda x, y: x * y, x.shape)
    if hasattr(x, 'size'):
        return reduce(lambda x, y: x * y, x.size)
    if isinstance(x, Iterable):
        return reduce(lambda x, y: x * y, x)
    return x.n
__init__.py 文件源码 项目:python- 作者: secondtonone1 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc))


问题


面经


文章

微信
公众号

扫码关注公众号