python类merge()的实例源码

config.py 文件源码 项目:call_map 作者: nccgroup 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def get_config(self):
        if self._cache is None:
            read_result = self.read_user_config()
            return tz.merge(self.default_user_config,
                            read_result,
                            self.session_overrides)
        else:
            return self._cache
gui.py 文件源码 项目:call_map 作者: nccgroup 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def configure_role_markers(cls, want_unicode_role_markers):
        if want_unicode_role_markers:
            cls.role_markers = tz.merge(cls.default_role_markers, {'definition': '?', 'parent': '?'})
        else:
            cls.role_markers = cls.default_role_markers
encoder.py 文件源码 项目:sgnmt 作者: ucam-smt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def apply(self, source_sentence, source_sentence_mask):
        """Produces source annotations, either non-recurrently or with
        a bidirectional RNN architecture.
        """
        # Time as first dimension
        source_sentence = source_sentence.T
        source_sentence_mask = source_sentence_mask.T

        embeddings = self.lookup.apply(source_sentence)

        if self.n_layers >= 1:
            representation = self.bidir.apply(
                merge(self.fwd_fork.apply(embeddings, as_dict=True),
                      {'mask': source_sentence_mask}),
                merge(self.back_fork.apply(embeddings, as_dict=True),
                      {'mask': source_sentence_mask})
            )
            for _ in xrange(self.n_layers-1):
                if self.skip_connections:
                    inp = tensor.concatenate([representation, embeddings],
                                             axis=2)
                else:
                    inp = representation
                representation = self.bidir.apply(
                    merge(self.mid_fwd_fork.apply(inp, as_dict=True),
                          {'mask': source_sentence_mask}),
                    merge(self.mid_back_fork.apply(inp, as_dict=True),
                          {'mask': source_sentence_mask})
                )
        else:
            representation = embeddings
        return representation, source_sentence_mask
encoder.py 文件源码 项目:sgnmt 作者: ucam-smt 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def apply(self, source_sentence, source_sentence_mask):
        """Produces source annotations, either non-recurrently or with
        a bidirectional RNN architecture.
        """
        # Time as first dimension
        source_sentence = source_sentence.T
        source_sentence_mask = source_sentence_mask.T
        embeddings = self.lookup.apply(source_sentence)
        representation = self.bidirs[0].apply(
                merge(self.fwd_forks[0].apply(embeddings, as_dict=True),
                      {'mask': source_sentence_mask}),
                merge(self.back_forks[0].apply(embeddings, as_dict=True),
                      {'mask': source_sentence_mask}))
        for i in xrange(1, self.n_layers):
            if self.skip_connections:
                inp = tensor.concatenate([representation, embeddings],
                                         axis=2)
            else:
                inp = representation
            representation = self.bidirs[i].apply(
                merge(self.fwd_forks[i].apply(inp, as_dict=True),
                      {'mask': source_sentence_mask}),
                merge(self.back_forks[i].apply(inp, as_dict=True),
                      {'mask': source_sentence_mask})
            )
        return representation, source_sentence_mask
encoder.py 文件源码 项目:sgnmt 作者: ucam-smt 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def apply(self, base_annotations, base_mask):
        ann_representation = self.transition.apply(
            **merge(self.rnn_inputs, {
                'mask': base_mask,
                'attended': base_annotations,
                'attended_mask': base_mask}))[0]
        return ann_representation, base_mask
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def apply(self, char_seq, sample_matrix, char_aux):
        # Time as first dimension
        embeddings = self.lookup.apply(char_seq)
        gru_out = self.dgru.apply(
            **merge(self.gru_fork.apply(embeddings, as_dict=True),
                    {'mask': char_aux}))
        wgru_out = tensor.exp(self.wl.apply(self.bidir_w.apply(embeddings, char_aux)))

        if self.dgru_depth > 1:
            gru_out = gru_out[-1]

        gru_out = tensor.addbroadcast(wgru_out, 2) * gru_out
        sampled_representation = tensor.tanh(tensor.batched_dot(sample_matrix, gru_out.dimshuffle([1, 0, 2])))
        return sampled_representation.dimshuffle([1, 0, 2]), wgru_out
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 13 收藏 0 点赞 0 评论 0
def apply(self, char_seq, sample_matrix, char_aux):
        # Time as first dimension
        embeddings = self.lookup.apply(char_seq)
        gru_out = self.dgru.apply(
            **merge(self.gru_fork.apply(embeddings, as_dict=True),
                    {'mask': char_aux}))
        if self.dgru_depth > 1:
            gru_out = gru_out[-1]
        sampled_representation = tensor.batched_dot(sample_matrix, gru_out.dimshuffle([1, 0, 2]))
        return sampled_representation.dimshuffle([1, 0, 2])
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def single_emit(self, target_single_char, batch_size, mask, states=None):
        # Time as first dimension
        # only one batch
        embeddings = self.lookup.apply(target_single_char)
        if states is None:
            states = self.dgru.initial_states(batch_size)
        states_dict = {'states': states[0]}
        for i in range(1, self.dgru_depth):
            states_dict['states' + RECURRENTSTACK_SEPARATOR + str(i)] = states[i]
        gru_out = self.dgru.apply(**merge(self.gru_fork.apply(embeddings, as_dict=True), states_dict,
                                          {'mask': mask, 'iterate': False}))
        return gru_out
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def __init__(self, vocab_size, embedding_dim, igru_state_dim, igru_depth, trg_dgru_depth, emitter,
                 feedback_brick, merge=None, merge_prototype=None, post_merge=None, **kwargs):
        merged_dim = igru_state_dim
        if not merge:
            merge = Merge(input_names=kwargs['source_names'],
                          prototype=merge_prototype)
        if not post_merge:
            post_merge = Bias(dim=merged_dim)

        # for compatible
        if igru_depth == 1:
            self.igru = IGRU(dim=igru_state_dim)
        else:
            self.igru = RecurrentStack([IGRU(dim=igru_state_dim, name='igru')] +
                                       [UpperIGRU(dim=igru_state_dim, activation=Tanh(), name='upper_igru' + str(i))
                                        for i in range(1, igru_depth)],
                                       skip_connections=True)
        self.embedding_dim = embedding_dim
        self.emitter = emitter
        self.feedback_brick = feedback_brick
        self.merge = merge
        self.post_merge = post_merge
        self.merged_dim = merged_dim
        self.igru_depth = igru_depth
        self.trg_dgru_depth = trg_dgru_depth
        self.lookup = LookupTable(name='embeddings')
        self.vocab_size = vocab_size
        self.igru_state_dim = igru_state_dim
        self.gru_to_softmax = Linear(input_dim=igru_state_dim, output_dim=vocab_size)
        self.gru_fork = Fork([name for name in self.igru.apply.sequences
                              if name != 'mask' and name != 'input_states'], prototype=Linear(), name='gru_fork')

        children = [self.emitter, self.feedback_brick, self.merge, self.post_merge,
                    self.igru, self.lookup, self.gru_to_softmax, self.gru_fork]
        kwargs.setdefault('children', []).extend(children)
        super(Interpolator, self).__init__(**kwargs)
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _push_allocation_config(self):
        self.lookup.length = self.vocab_size
        self.lookup.dim = self.embedding_dim
        self.emitter.readout_dim = self.get_dim('readouts')
        self.merge.input_names = self.source_names
        self.merge.input_dims = self.source_dims
        self.merge.output_dim = self.merged_dim
        self.post_merge.input_dim = self.merged_dim
        self.post_merge.output_dim = self.igru_state_dim
        self.gru_fork.input_dim = self.embedding_dim
        self.gru_fork.output_dims = [self.igru.get_dim(name)
                                     for name in self.gru_fork.output_names]
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def readout(self, **kwargs):
        merged = self.merge.apply(**{name: kwargs[name]
                                     for name in self.merge.input_names})
        merged = self.post_merge.apply(merged)
        return merged
model.py 文件源码 项目:DCNMT 作者: SwordYork 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def readout_gru(self, target_prev_char_seq, target_prev_char_aux, input_states):
        embeddings = self.lookup.apply(target_prev_char_seq)
        gru_out = self.igru.apply(
            **merge(self.gru_fork.apply(embeddings, as_dict=True),
                    {'mask': target_prev_char_aux, 'input_states': input_states}))
        if self.igru_depth > 1:
            gru_out = gru_out[-1]
        readout_chars = self.gru_to_softmax.apply(gru_out)
        return readout_chars
extensions.py 文件源码 项目:dl4mt-multi-src 作者: nyu-dl 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def do(self, which_callback, *args):
        iterations_done = self.main_loop.status['iterations_done']
        if self.burnin <= iterations_done:
            # Save the model here
            iterations_done = self.main_loop.status['iterations_done']
            filename = os.path.join(
                self.saveto, 'params_iter{}.npz'.format(iterations_done))
            s = signal.signal(signal.SIGINT, signal.SIG_IGN)
            logger.info(" Incremental dump {}".format(filename))
            params_to_save = []
            for cg_name in self.main_loop.models.keys():
                params_to_save.append(
                    self.main_loop.models[cg_name].get_param_values())
            params_to_save = merge(params_to_save)
            secure_numpy_save(params_to_save, filename)
            if self.save_iter_state:
                filename_is = os.path.join(
                    self.saveto,
                    'iterations_state_iter{}.pkl'.format(iterations_done))
                logger.info(" Incremental dump {}".format(filename_is))
                secure_pickle_dump(self.main_loop.iteration_state, filename_is)
            if self.save_log:
                filename_log = os.path.join(
                    self.saveto,
                    'log_iter{}'.format(iterations_done))
                logger.info(" Incremental dump {}".format(filename_log))
                secure_pickle_dump(self.main_loop.log, filename_log)
            signal.signal(signal.SIGINT, s)
extensions.py 文件源码 项目:dl4mt-multi-src 作者: nyu-dl 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def dump_parameters(self, main_loop):
        params_to_save = []
        for model in main_loop.models.values():
            params_to_save.append(model.get_param_values())
        secure_numpy_save(merge(params_to_save),
                          self.path_to_parameters)
models.py 文件源码 项目:dl4mt-multi-src 作者: nyu-dl 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def get_params(self):
        return merge(self.encoder.get_params(),
                     self.decoder.get_params())
debug.py 文件源码 项目:amino 作者: tek 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __call(self, a, kw):
        sub_a, rest = self.__substitute__(self.__args, List.wrap(a))
        sub_kw = merge(self.__kwargs, kw)
        return self.__func(*sub_a, **sub_kw), rest
conftest.py 文件源码 项目:provenance 作者: bmabey 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def artifact_record(**kargs):
    artifact_props = t.merge({k: None for k in  pc.artifact_properties},
                             _artifact_record_st.example(),
                             {'inputs': {'varargs':[1,2,3], 'kargs': {}},
                              'fn_module': 'foo', 'fn_name': 'bar',
                              'value': 55, 'name': 'bar',
                              'version': 0,
                              'serializer': 'joblib',
                              'run_info': pc.run_info()},
                             kargs)
    return pc.ArtifactRecord(**artifact_props)
repos.py 文件源码 项目:provenance 作者: bmabey 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __repr__(self):
        return "lazy_dict({})".format(
            t.merge(t.valmap(lambda _: "...", self.thunks), self.realized))
repos.py 文件源码 项目:provenance 作者: bmabey 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def lazy_proxy_dict(artifacts_or_ids, group_artifacts_of_same_name=False):
    """
    Takes a list of artifacts or artifact ids and returns a dictionary whose
    keys are the names of the artifacts. The values will be lazily loaded into
    proxies as requested.

    Parameters
    ----------
    artifacts_or_ids : collection of artifacts or artifact ids (strings)

    group_artifacts_of_same_name: bool (default: False)
    If set to True then artifacts of the same name will be grouped together in
    one list. When set to False an exception will be raised
    """
    if isinstance(artifacts_or_ids, dict):
        artifacts = t.valmap(coerce_to_artifact, artifacts_or_ids)
        lambdas = {name: (lambda a: lambda: a.proxy())(a)
                   for name, a in artifacts.items()}
        return lazy_dict(lambdas)

    # else we have a collection
    artifacts = coerce_to_artifacts(artifacts_or_ids)
    by_name = t.groupby(lambda a: a.name, artifacts)
    singles = t.valfilter(lambda l: len(l) == 1, by_name)
    multi = t.valfilter(lambda l: len(l) > 1, by_name)

    lambdas = {name: (lambda a: lambda: a.proxy())(a[0]) for name, a in singles.items()}

    if group_artifacts_of_same_name and len(multi) > 0:
        lambdas = t.merge(lambdas,
                          {name:
                           (lambda artifacts: (lambda: [a.proxy() for a in artifacts]))(artifacts)
                           for name, artifacts in multi.items()})

    if not group_artifacts_of_same_name and len(multi) > 0:
        raise ValueError("""Only artifacts with distinct names can be used in a lazy_proxy_dict.
Offending names: {}
Use the option `group_artifacts_of_same_name=True` if you want a list of proxies to be returned under the respective key.
        """.format({n: len(a) for n, a in multi.items()}))

    return lazy_dict(lambdas)
keras.py 文件源码 项目:provenance 作者: bmabey 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def register_custom_objects(mapping, merge=False):
    if merge:
        res = t.merge(REGISTERED_CUSTOM_OBJECTS, mapping)
    else:
        res = mapping

    REGISTERED_CUSTOM_OBJECTS = res



#TODO: move custom_objects into the attrs


问题


面经


文章

微信
公众号

扫码关注公众号