python类tile()的实例源码

theano_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def tile(x, n):
    # TODO: `keras_shape` inference.
    return T.tile(x, n)
seq2seq.py 文件源码 项目:world_merlin 作者: pbaljeka 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, rng, x, d):

        self.input = x
        self.dur_input = d
        self.encoded_output = self.encode_final_state()

    ### default seq-to-seq model: tile C as input to all frames ###
seq2seq.py 文件源码 项目:world_merlin 作者: pbaljeka 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def encode_final_state(self):
        context_vector       = self.input[-1, ]
        tiled_context_vector = T.tile(context_vector, (T.sum(self.dur_input), 1))

        return tiled_context_vector
seq2seq.py 文件源码 项目:world_merlin 作者: pbaljeka 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, rng, x, d):

        self.input = x
        self.dur_input = d
        self.encoded_output = self.encode_all_states()

    ### Distributed seq-to-seq model: tile C_1-C_n as input to corresponding decoder frames ###
reset_layers.py 文件源码 项目:conv_qsar_fast 作者: connorcoley 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def reset(model):
    '''Given a Keras model consisting only of GraphFP, Dense, and Dropout layers,
    this function will reset the trainable weights to save time for CV tests.'''

    for layer in model.layers:
        # Note: these are custom depending on the layer type
        if '.GraphFP' in str(layer):
            W_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))
            b_inner = np.zeros((1, layer.inner_dim))
            # Inner weights
            layer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \
                initializers.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))
            layer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval()  + \
                initializers.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))

            # Outer weights
            W_output = layer.init_output((layer.inner_dim, layer.output_dim), scale = layer.scale_output)
            b_output = np.zeros((1, layer.output_dim))
            # Initialize weights tensor
            layer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))
            layer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))
            print('graphFP layer reset')

        elif '.Dense' in str(layer):
            layer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))
            layer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))
            print('dense layer reset')

        elif '.Dropout' in str(layer):
            print('dropout unchanged')
        else:
            print('Not reseting weights for {}'.format(str(layer)))
    print('Reset model weights')
    return model
basic.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def simple_upsample3d(inpt, up_factor):
    inpt = T.repeat(inpt, up_factor[0], axis=3)
    inpt = T.repeat(inpt, up_factor[1], axis=4)
    inpt = T.repeat(inpt, up_factor[2], axis=1)
    #rep = [1, up_factor[2], 1, up_factor[0], up_factor[1]]
    #inpt = T.tile(inpt, rep, ndim=5)
    return inpt
seq2seq.py 文件源码 项目:mimicry.ai 作者: fizerkhan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, rng, x, d):

        self.input = x
        self.dur_input = d
        self.encoded_output = self.encode_final_state()

    ### default seq-to-seq model: tile C as input to all frames ###
seq2seq.py 文件源码 项目:mimicry.ai 作者: fizerkhan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encode_final_state(self):
        context_vector       = self.input[-1, ]
        tiled_context_vector = T.tile(context_vector, (T.sum(self.dur_input), 1))

        return tiled_context_vector
seq2seq.py 文件源码 项目:mimicry.ai 作者: fizerkhan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, rng, x, d):

        self.input = x
        self.dur_input = d
        self.encoded_output = self.encode_all_states()

    ### Distributed seq-to-seq model: tile C_1-C_n as input to corresponding decoder frames ###
lstm.py 文件源码 项目:DeepLearning 作者: gokererdogan 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward_pass(self):
        def recurrence(x_t, h_tm1, c_tm1):
            i = T.nnet.sigmoid(T.dot(x_t, self.wi) + T.dot(h_tm1, self.wih) + self.bi)  # input gate
            c_proposed = T.tanh(T.dot(x_t, self.wc) + T.dot(h_tm1, self.wch) + self.bc)  # proposed memory cell content
            f = T.nnet.sigmoid(T.dot(x_t, self.wf) + T.dot(h_tm1, self.wfh) + self.bf)  # forget gate
            c_t = (T.tile(i, self.memory_size) * c_proposed) + (T.tile(f, self.memory_size) * c_tm1)  # new memory cell content
            o = T.nnet.sigmoid(T.dot(x_t, self.wo) + T.dot(h_tm1, self.woh) + self.bo)  # output gate
            h_t = T.tile(o, self.memory_size) * T.tanh(c_t)
            return [h_t, c_t]

        [h, c], _ = theano.scan(fn=recurrence, sequences=self.input,
                                outputs_info=[self.h0, self.c0], n_steps=self.input.shape[0])

        return h, c
theano_backend.py 文件源码 项目:reading-text-in-the-wild 作者: mathDR 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def tile(x, n):
    return T.tile(x, n)
simple.py 文件源码 项目:pyrl 作者: frsong 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_regs(self, states_0_, states, M):
        """
        Additional regularization terms.

        """
        regs = 0

        if self.L1_Wrec > 0:
            W = self.params['Wrec']
            regs += self.L1_Wrec * tensor.mean(abs(W))

        if self.L2_Wrec > 0:
            W = self.params['Wrec']
            regs += self.L2_Wrec * tensor.mean(tensor.sqr(W))

        #---------------------------------------------------------------------------------
        # Firing rates
        #---------------------------------------------------------------------------------

        if self.L2_r > 0:
            baseline = 0.

            M_ = (tensor.tile(M.T, (states.shape[-1], 1, 1))).T
            states_all = tensor.concatenate(
                [states_0_.reshape((1, states_0_.shape[0], states_0_.shape[1])), states],
                axis=0
                )
            r = self.f_hidden(states_all)
            regs += self.L2_r * tensor.sum(tensor.sqr(r - baseline)*M_)/tensor.sum(M_)

        #---------------------------------------------------------------------------------

        return regs
conv1d.py 文件源码 项目:DBQA-KBQA 作者: Lucien-qiang 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def convolve1d_4D(input, W, mode='full'):
    batch_size, nchannels, nwords, ndim = input.shape
    nkernels_out, nkernels_in, filter_width, ndim = W.shape
    # Unroll filter along columns
    W_unrolled = W.dimshuffle(0, 2, 1, 3).flatten(ndim=3)

    # Replicate input filters 'batch_size' times and squash out_filters along column axis.
    # W_tiled = T.tile(W_unrolled, (1, 1, batch_size)).dimshuffle(1, 0, 2).flatten(ndim=2)  # doesn't give a gradient
    W_tiled = T.alloc(W_unrolled, batch_size, W_unrolled.shape[0], W_unrolled.shape[1], W_unrolled.shape[2]).dimshuffle(1, 2, 0, 3).flatten(ndim=3).dimshuffle(1, 0, 2).flatten(ndim=2)

    # Unroll input and pad to fit the output filters.
    input_reshaped = input.dimshuffle(0, 2, 1, 3).flatten(ndim=3).dimshuffle(1,0,2).flatten(ndim=2)
    # input_tiled = T.tile(input_reshaped, (1, nkernels_out))
    input_tiled = T.alloc(input_reshaped, nkernels_out, input_reshaped.shape[0], input_reshaped.shape[1]).dimshuffle(1, 0, 2).flatten(ndim=2)

    conv_res = convolve1d_2D(input_tiled, W_tiled, mode=mode)
    if mode == 'full':
      new_shape = (nwords+filter_width-1, nkernels_out, batch_size, nkernels_in, ndim)
    elif mode == 'valid':
      new_shape = (nwords-filter_width+1, nkernels_out, batch_size, nkernels_in, ndim)

    conv_out = conv_res.reshape(new_shape).dimshuffle(2, 1, 0, 3, 4).sum(axis=3)
    return conv_out

##########################################
### Using einsum for 4d matrices
##########################################
conv1d.py 文件源码 项目:DBQA-KBQA 作者: Lucien-qiang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def convolve1d_4D_scan(input, W, mode='full'):
  batch_size, nchannels, nwords, ndim = input.shape
  nkernels_out, nkernels_in, filter_width, ndim = W.shape

  # Unroll filter along columns
  W_unrolled = W.dimshuffle(0, 2, 1, 3).flatten(ndim=3)
  # Replicate input filters 'batch_size' times and squash out_filters along column axis.
  # W_tiled = T.tile(W_unrolled, (1, 1, batch_size)).dimshuffle(1, 0, 2).flatten(ndim=2)  # doesn't give a gradient
  W_tiled = T.alloc(W_unrolled, batch_size, W_unrolled.shape[0], W_unrolled.shape[1], W_unrolled.shape[2]).dimshuffle(1, 2, 0, 3).flatten(ndim=3).dimshuffle(1, 0, 2).flatten(ndim=2)
  W_tiled = W_tiled[::-1]
  # reverse_slicing = [slice(None, None, None)] * W_tiled.ndim
  # reverse_slicing[0] = slice(None, None, -1)
  # reverse_slicing = tuple(reverse_slicing)
  # W_tiled = W_tiled[reverse_slicing]  # flip the kernel

  # Unroll input and pad to fit the output filters.
  input_reshaped = input.dimshuffle(0, 2, 1, 3).flatten(ndim=3).dimshuffle(1,0,2).flatten(ndim=2)
  # input_tiled = T.tile(input_reshaped, (1, nkernels_out))
  input_tiled = T.alloc(input_reshaped, nkernels_out, input_reshaped.shape[0], input_reshaped.shape[1]).dimshuffle(1, 0, 2).flatten(ndim=2)

  if mode == 'full':
    pad = T.zeros((filter_width-1, nkernels_out*batch_size*nchannels*ndim))
    input_padded = T.concatenate([pad, input_tiled, pad])
    conv_out, _ = theano.scan(fn=lambda i: (W_tiled * input_padded[i:i+filter_width]).sum(axis=0),
                              outputs_info=None,
                              sequences=[T.arange(0, nwords+filter_width-1)])
    new_shape = (nwords+filter_width-1, nkernels_out, batch_size, nkernels_in, ndim)
  elif mode == 'valid':
    conv_out, _ = theano.scan(fn=lambda i: (W_tiled * input_tiled[i:i+filter_width]).sum(axis=0),
                              outputs_info=None,
                              sequences=[T.arange(0, nwords-filter_width+1)])
    new_shape = (nwords-filter_width+1, nkernels_out, batch_size, nkernels_in, ndim)

  conv_reshaped = conv_out.reshape(new_shape).dimshuffle(2, 1, 0, 3, 4).sum(axis=3)
  return conv_reshaped
model.py 文件源码 项目:gogh-figure 作者: joelmoniz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def style_loss5d(self, out_layer, target_style_layer):
        # Each input is a 5D tensor: (style loss layer, batch, feature map, height, width)
        return T.mean(T.sum(T.sqr(self.batched_gram(out_layer) - T.tile(self.batched_gram(target_style_layer), (1, T.shape(out_layer)[0], 1, 1))), axis=(2,3)), axis=1)
model.py 文件源码 项目:gogh-figure 作者: joelmoniz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def style_loss(self, out_layer, target_style_layer):
        # Each input is a 4D tensor: (batch, feature map, height, width)
        # TODO: Make the first dim broadcastable instead of tiling
        return T.mean(T.sqr(self.batched_gram(out_layer) - T.tile(self.batched_gram(target_style_layer), (T.shape(out_layer)[0], 1, 1))))
model.py 文件源码 项目:gogh-figure 作者: joelmoniz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def style_loss_pg(self, out_layer, target_style_gram):
        # Each input is a 4D tensor: (batch, feature map, height, width)
        # TODO: Make the first dim broadcastable instead of tiling
        return T.mean(T.sqr(self.batched_gram(out_layer) - T.tile(target_style_gram, (T.shape(out_layer)[0], 1, 1))))
eval_rank.py 文件源码 项目:ConversationalQA 作者: btjhjeon 项目源码 文件源码 阅读 106 收藏 0 点赞 0 评论 0
def build_model(tparams, options):
    """
    Construct computation graph for the whole model
    """
    # inputs (image, sentence, contrast images, constrast sentences)
    im = tensor.matrix('im', dtype='float32')
    s = tensor.matrix('s', dtype='float32')
    cim = tensor.matrix('cim', dtype='float32')
    cs = tensor.matrix('cs', dtype='float32')

    # image embedding
    lim = get_layer('ff')[1](tparams, im, options, prefix='ff_im', activ='linear')
    lcim = get_layer('ff')[1](tparams, cim, options, prefix='ff_im', activ='linear')

    # sentence embedding
    ls = get_layer('ff')[1](tparams, s, options, prefix='ff_s', activ='linear')
    lcs = get_layer('ff')[1](tparams, cs, options, prefix='ff_s', activ='linear')

    # L2 norm for sentences
    ls = l2norm(ls)
    lcs = l2norm(lcs)

    # Tile by number of contrast terms
    lim = tensor.tile(lim, (options['ncon'], 1))
    ls = tensor.tile(ls, (options['ncon'], 1))

    # pairwise ranking loss
    cost_im = options['margin'] - (lim * ls).sum(axis=1) + (lim * lcs).sum(axis=1)
    cost_im = cost_im * (cost_im > 0.)
    cost_im = cost_im.sum(0)

    cost_s = options['margin'] - (ls * lim).sum(axis=1) + (ls * lcim).sum(axis=1)
    cost_s = cost_s * (cost_s > 0.)
    cost_s = cost_s.sum(0)

    cost = cost_im + cost_s
    return [im, s, cim, cs], cost

# build an encoder
conv1d.py 文件源码 项目:DEEP-CLICK-MODEL 作者: THUIR 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def convolve1d_4D(input, W, mode='full'):
    batch_size, nchannels, nwords, ndim = input.shape
    nkernels_out, nkernels_in, filter_width, ndim = W.shape
    # Unroll filter along columns
    W_unrolled = W.dimshuffle(0, 2, 1, 3).flatten(ndim=3)

    # Replicate input filters 'batch_size' times and squash out_filters along column axis.
    # W_tiled = T.tile(W_unrolled, (1, 1, batch_size)).dimshuffle(1, 0, 2).flatten(ndim=2)  # doesn't give a gradient
    W_tiled = T.alloc(W_unrolled, batch_size, W_unrolled.shape[0], W_unrolled.shape[1], W_unrolled.shape[2]).dimshuffle(1, 2, 0, 3).flatten(ndim=3).dimshuffle(1, 0, 2).flatten(ndim=2)

    # Unroll input and pad to fit the output filters.
    input_reshaped = input.dimshuffle(0, 2, 1, 3).flatten(ndim=3).dimshuffle(1,0,2).flatten(ndim=2)
    # input_tiled = T.tile(input_reshaped, (1, nkernels_out))
    input_tiled = T.alloc(input_reshaped, nkernels_out, input_reshaped.shape[0], input_reshaped.shape[1]).dimshuffle(1, 0, 2).flatten(ndim=2)

    conv_res = convolve1d_2D(input_tiled, W_tiled, mode=mode)
    if mode == 'full':
      new_shape = (nwords+filter_width-1, nkernels_out, batch_size, nkernels_in, ndim)
    elif mode == 'valid':
      new_shape = (nwords-filter_width+1, nkernels_out, batch_size, nkernels_in, ndim)

    conv_out = conv_res.reshape(new_shape).dimshuffle(2, 1, 0, 3, 4).sum(axis=3)
    return conv_out

##########################################
### Using einsum for 4d matrices
##########################################
conv1d.py 文件源码 项目:DEEP-CLICK-MODEL 作者: THUIR 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def convolve1d_4D_scan(input, W, mode='full'):
  batch_size, nchannels, nwords, ndim = input.shape
  nkernels_out, nkernels_in, filter_width, ndim = W.shape

  # Unroll filter along columns
  W_unrolled = W.dimshuffle(0, 2, 1, 3).flatten(ndim=3)
  # Replicate input filters 'batch_size' times and squash out_filters along column axis.
  # W_tiled = T.tile(W_unrolled, (1, 1, batch_size)).dimshuffle(1, 0, 2).flatten(ndim=2)  # doesn't give a gradient
  W_tiled = T.alloc(W_unrolled, batch_size, W_unrolled.shape[0], W_unrolled.shape[1], W_unrolled.shape[2]).dimshuffle(1, 2, 0, 3).flatten(ndim=3).dimshuffle(1, 0, 2).flatten(ndim=2)
  W_tiled = W_tiled[::-1]
  # reverse_slicing = [slice(None, None, None)] * W_tiled.ndim
  # reverse_slicing[0] = slice(None, None, -1)
  # reverse_slicing = tuple(reverse_slicing)
  # W_tiled = W_tiled[reverse_slicing]  # flip the kernel

  # Unroll input and pad to fit the output filters.
  input_reshaped = input.dimshuffle(0, 2, 1, 3).flatten(ndim=3).dimshuffle(1,0,2).flatten(ndim=2)
  # input_tiled = T.tile(input_reshaped, (1, nkernels_out))
  input_tiled = T.alloc(input_reshaped, nkernels_out, input_reshaped.shape[0], input_reshaped.shape[1]).dimshuffle(1, 0, 2).flatten(ndim=2)

  if mode == 'full':
    pad = T.zeros((filter_width-1, nkernels_out*batch_size*nchannels*ndim))
    input_padded = T.concatenate([pad, input_tiled, pad])
    conv_out, _ = theano.scan(fn=lambda i: (W_tiled * input_padded[i:i+filter_width]).sum(axis=0),
                              outputs_info=None,
                              sequences=[T.arange(0, nwords+filter_width-1)])
    new_shape = (nwords+filter_width-1, nkernels_out, batch_size, nkernels_in, ndim)
  elif mode == 'valid':
    conv_out, _ = theano.scan(fn=lambda i: (W_tiled * input_tiled[i:i+filter_width]).sum(axis=0),
                              outputs_info=None,
                              sequences=[T.arange(0, nwords-filter_width+1)])
    new_shape = (nwords-filter_width+1, nkernels_out, batch_size, nkernels_in, ndim)

  conv_reshaped = conv_out.reshape(new_shape).dimshuffle(2, 1, 0, 3, 4).sum(axis=3)
  return conv_reshaped


问题


面经


文章

微信
公众号

扫码关注公众号