python类split()的实例源码

sentiment.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_embeddings(filename):
  """Loads embedings, returns weight matrix and dict from words to indices."""
  weight_vectors = []
  word_idx = {}
  with codecs.open(filename, encoding='utf-8') as f:
    for line in f:
      word, vec = line.split(u' ', 1)
      word_idx[word] = len(weight_vectors)
      weight_vectors.append(np.array(vec.split(), dtype=np.float32))
  # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and
  # '-RRB-' respectively in the parse-trees.
  word_idx[u'-LRB-'] = word_idx.pop(u'(')
  word_idx[u'-RRB-'] = word_idx.pop(u')')
  # Random embedding vector for unknown words.
  weight_vectors.append(np.random.uniform(
      -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))
  return np.stack(weight_vectors), word_idx
sentiment.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    with tf.variable_scope(scope or type(self).__name__):
      lhs, rhs = state
      c0, h0 = lhs
      c1, h1 = rhs
      concat = tf.contrib.layers.linear(
          tf.concat([inputs, h0, h1], 1), 5 * self._num_units)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f0, f1, o = tf.split(value=concat, num_or_size_splits=5, axis=1)

      j = self._activation(j)
      if not isinstance(self._keep_prob, float) or self._keep_prob < 1:
        j = tf.nn.dropout(j, self._keep_prob, seed=self._seed)

      new_c = (c0 * tf.sigmoid(f0 + self._forget_bias) +
               c1 * tf.sigmoid(f1 + self._forget_bias) +
               tf.sigmoid(i) * j)
      new_h = self._activation(new_c) * tf.sigmoid(o)

      new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)

      return new_h, new_state
seq2seq.py 文件源码 项目:basic-encoder-decoder 作者: pemywei 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test(self, sess, token_ids):
        # We decode one sentence at a time.
        token_ids = data_utils.padding(token_ids)
        target_ids = data_utils.padding([data_utils.GO_ID])
        y_ids = data_utils.padding([data_utils.EOS_ID])
        encoder_inputs, decoder_inputs, _, _ = data_utils.nextRandomBatch([(token_ids, target_ids, y_ids)], batch_size=1)
        prediction = sess.run(self.prediction, feed_dict={
            self.encoder_inputs: encoder_inputs,
            self.decoder_inputs: decoder_inputs
        })
        pred_max = tf.arg_max(prediction, 1)
        # prediction = tf.split(0, self.num_steps, prediction)
        # # This is a greedy decoder - outputs are just argmaxes of output_logits.
        # outputs = [int(np.argmax(predict)) for predict in prediction]
        # # If there is an EOS symbol in outputs, cut them at that point.
        # if data_utils.EOS_ID in outputs:
        #     outputs = outputs[:outputs.index(data_utils.EOS_ID)]
        return pred_max.eval()
answer_layer.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def bilinear_answer_layer(size, encoded_question, question_length, encoded_support, support_length,
                          support2question, answer2support, is_eval, beam_size=1,
                          max_span_size=10000):
    """Answer layer for multiple paragraph QA."""
    # computing single time attention over question
    size = encoded_support.get_shape()[-1].value
    question_state = compute_question_state(encoded_question, question_length)

    # compute logits
    hidden = tf.gather(tf.layers.dense(question_state, 2 * size, name="hidden"), support2question)
    hidden_start, hidden_end = tf.split(hidden, 2, 1)

    support_mask = misc.mask_for_lengths(support_length)

    start_scores = tf.einsum('ik,ijk->ij', hidden_start, encoded_support)
    start_scores = start_scores + support_mask

    end_scores = tf.einsum('ik,ijk->ij', hidden_end, encoded_support)
    end_scores = end_scores + support_mask

    return compute_spans(start_scores, end_scores, answer2support, is_eval, support2question,
                         beam_size, max_span_size)
rnn.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __call__(self, inputs, initial_state=None, dtype=tf.float32, sequence_length=None, scope=None):
        num_gates = 3 if self._with_residual else 2
        transformed = tf.layers.dense(inputs, num_gates * self._num_units,
                                      bias_initializer=tf.constant_initializer(self._constant_bias))

        gates = tf.split(transformed, num_gates, axis=2)
        forget_gate = tf.sigmoid(gates[1])
        transformed_inputs = (1.0 - forget_gate) * gates[0]
        if self._with_residual:
            residual_gate = tf.sigmoid(gates[2])
            inputs *= (1.0 - residual_gate)
            new_inputs = tf.concat([inputs, transformed_inputs, forget_gate, residual_gate], axis=2)
        else:
            new_inputs = tf.concat([transformed_inputs, forget_gate], axis=2)

        return self._rnn(new_inputs, initial_state, dtype, sequence_length, scope)
nn.py 文件源码 项目:mnist_LeNet 作者: LuxxxLucy 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def gated_resnet(x, a=None, h=None, nonlinearity=concat_elu, conv=conv2d, init=False, counters={}, ema=None, dropout_p=0., **kwargs):
    xs = int_shape(x)
    num_filters = xs[-1]

    c1 = conv(nonlinearity(x), num_filters)
    if a is not None:  # add short-cut connection if auxiliary input 'a' is given
        c1 += nin(nonlinearity(a), num_filters)
    c1 = nonlinearity(c1)
    if dropout_p > 0:
        c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
    c2 = conv(c1, num_filters * 2, init_scale=0.1)

    # add projection of h vector if included: conditional generation
    if h is not None:
        with tf.variable_scope(get_name('conditional_weights', counters)):
            hw = get_var_maybe_avg('hw', ema, shape=[int_shape(h)[-1], 2 * num_filters], dtype=tf.float32,
                                   initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
        if init:
            hw = hw.initialized_value()
        c2 += tf.reshape(tf.matmul(h, hw), [xs[0], 1, 1, 2 * num_filters])

    # Is this 3,2 or 2,3 ?
    a, b = tf.split(c2, 2, 3)
    c3 = a * tf.nn.sigmoid(b)
    return x + c3
rnn_cell.py 文件源码 项目:qrn 作者: uwnlp 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def pre(self, inputs, scope=None):
        """Preprocess inputs to be used by the cell. Assumes [N, J, *]
        [x, u]"""
        is_train = self._is_train
        keep_prob = self._keep_prob
        gate_size = self._gate_size
        with tf.variable_scope(scope or "pre"):
            x, u, _, _ = tf.split(2, 4, tf.slice(inputs, [0, 0, gate_size], [-1, -1, -1]))  # [N, J, d]
            a_raw = linear([x * u], gate_size, True, scope='a_raw', var_on_cpu=self._var_on_cpu,
                           wd=self._wd, initializer=self._initializer)
            a = tf.sigmoid(a_raw - self._forget_bias, name='a')
            if keep_prob < 1.0:
                x = tf.cond(is_train, lambda: tf.nn.dropout(x, keep_prob), lambda: x)
                u = tf.cond(is_train, lambda: tf.nn.dropout(u, keep_prob), lambda: u)
            v_t = tf.nn.tanh(linear([x, u], self._num_units, True,
                             var_on_cpu=self._var_on_cpu, wd=self._wd, scope='v_raw'), name='v')
            new_inputs = tf.concat(2, [a, x, u, v_t])  # [N, J, 3*d + 1]
        return new_inputs
rnn_cell.py 文件源码 项目:qrn 作者: uwnlp 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        gate_size = self._gate_size
        with tf.variable_scope(scope or type(self).__name__):  # "RSMCell"
            with tf.name_scope("Split"):  # Reset gate and update gate.
                a = tf.slice(inputs, [0, 0], [-1, gate_size])
                x, u, v_t = tf.split(1, 3, tf.slice(inputs, [0, gate_size], [-1, -1]))
                o = tf.slice(state, [0, 0], [-1, 1])
                h, v = tf.split(1, 2, tf.slice(state, [0, gate_size], [-1, -1]))

            with tf.variable_scope("Main"):
                r_raw = linear([x * u], 1, True, scope='r_raw', var_on_cpu=self._var_on_cpu,
                               initializer=self._initializer)
                r = tf.sigmoid(r_raw, name='a')
                new_o = a * r + (1 - a) * o
                new_v = a * v_t + (1 - a) * v
                g = r * v_t
                new_h = a * g + (1 - a) * h

            with tf.name_scope("Concat"):
                new_state = tf.concat(1, [new_o, new_h, new_v])
                outputs = tf.concat(1, [a, r, x, new_h, new_v, g])

        return outputs, new_state
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def batch_set_value(tuples):
    '''Sets the values of many tensor variables at once.

    # Arguments
        tuples: a list of tuples `(tensor, value)`.
            `value` should be a Numpy array.
    '''
    if tuples:
        assign_ops = []
        feed_dict = {}
        for x, value in tuples:
            value = np.asarray(value)
            tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
            if hasattr(x, '_assign_placeholder'):
                assign_placeholder = x._assign_placeholder
                assign_op = x._assign_op
            else:
                assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape)
                assign_op = x.assign(assign_placeholder)
                x._assign_placeholder = assign_placeholder
                x._assign_op = assign_op
            assign_ops.append(assign_op)
            feed_dict[assign_placeholder] = value
        get_session().run(assign_ops, feed_dict=feed_dict)
layers.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def step(self, hprev, x):
        if self.layer_normalization:
            ln = apply_ln(self)
            x_ru = ln(tf.matmul(x, self.W_x_ru), "x_ru")
            h_ru = ln(tf.matmul(hprev, self.W_h_ru), "h_ru")
            x_r, x_u = tf.split(split_dim=1, num_split=2, value=x_ru)
            h_r, h_u = tf.split(split_dim=1, num_split=2, value=h_ru)
            x_c = ln(tf.matmul(x, self.W_xc), "x_c")
            h_c = ln(tf.matmul(hprev, self.W_hc), "h_c")
            r = self.gate_nonlinearity(x_r + h_r)
            u = self.gate_nonlinearity(x_u + h_u)
            c = self.nonlinearity(x_c + r * h_c)
            h = (1 - u) * hprev + u * c
            return h
        else:
            xb_ruc = tf.matmul(x, self.W_x_ruc) + tf.reshape(self.b_ruc, (1, -1))
            h_ruc = tf.matmul(hprev, self.W_h_ruc)
            xb_r, xb_u, xb_c = tf.split(split_dim=1, num_split=3, value=xb_ruc)
            h_r, h_u, h_c = tf.split(split_dim=1, num_split=3, value=h_ruc)
            r = self.gate_nonlinearity(xb_r + h_r)
            u = self.gate_nonlinearity(xb_u + h_u)
            c = self.nonlinearity(xb_c + r * h_c)
            h = (1 - u) * hprev + u * c
            return h
resnet.py 文件源码 项目:blitznet 作者: dvornikita 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def create_trunk(self, images):
        red, green, blue = tf.split(images*255, 3, axis=3)
        images = tf.concat([blue, green, red], 3) - MEAN_COLOR

        with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training=self.training,
                                                       weight_decay=self.weight_decay,
                                                       batch_norm_decay=args.bn_decay)):
            blocks = [
                resnet_utils.Block(
                    'block1', bottleneck, [(256, 64, 1)] * 3),
                resnet_utils.Block(
                    'block2', bottleneck, [(512, 128, 2)] + [(512, 128, 1)] * 3),
                resnet_utils.Block(
                    'block3', bottleneck, [(1024, 256, 2)] + [(1024, 256, 1)] * self.num_block3),
                resnet_utils.Block(
                    'block4', bottleneck, [(2048, 512, 2)] + [(2048, 512, 1)] * 2)
            ]

            net, endpoints = resnet_v1.resnet_v1(images, blocks,
                                                 global_pool=False,
                                                 reuse=self.reuse,
                                                 scope=self.scope)
            self.outputs = endpoints
        self.add_extra_layers(net)
utils.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
    nbatch, nin = [v.value for v in xs[0].get_shape()]
    nsteps = len(xs)
    with tf.variable_scope(scope):
        wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
        wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
        b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))

    c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
    for idx, (x, m) in enumerate(zip(xs, ms)):
        c = c*(1-m)
        h = h*(1-m)
        z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
        i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
        i = tf.nn.sigmoid(i)
        f = tf.nn.sigmoid(f)
        o = tf.nn.sigmoid(o)
        u = tf.tanh(u)
        c = f*c + i*u
        h = o*tf.tanh(c)
        xs[idx] = h
    s = tf.concat(axis=1, values=[c, h])
    return xs, s
lstm_models.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    split_x_target = tf.split(flat_x_target, self._output_depths, axis=-1)
    split_rnn_output = tf.split(
        flat_rnn_output, self._output_depths, axis=-1)

    losses = []
    truths = []
    predictions = []
    metric_map = {}
    for i in range(len(self._output_depths)):
      l, m, t, p = (
          super(MultiOutCategoricalLstmDecoder, self)._flat_reconstruction_loss(
              split_x_target[i], split_rnn_output[i]))
      losses.append(l)
      truths.append(t)
      predictions.append(p)
      for k, v in m.items():
        metric_map['%s_%d' % (k, i)] = v

    return (tf.reduce_sum(losses, axis=0),
            metric_map,
            tf.stack(truths),
            tf.stack(predictions))
bbox_transform_tf.py 文件源码 项目:luminoth 作者: tryolabs 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decode(roi, deltas):
    with tf.name_scope('BoundingBoxTransform/decode'):
        (roi_width, roi_height,
         roi_urx, roi_ury) = get_width_upright(roi)

        dx, dy, dw, dh = tf.split(deltas, 4, axis=1)

        pred_ur_x = dx * roi_width + roi_urx
        pred_ur_y = dy * roi_height + roi_ury
        pred_w = tf.exp(dw) * roi_width
        pred_h = tf.exp(dh) * roi_height

        bbox_x1 = pred_ur_x - 0.5 * pred_w
        bbox_y1 = pred_ur_y - 0.5 * pred_h

        # This -1. extra is different from reference implementation.
        bbox_x2 = pred_ur_x + 0.5 * pred_w - 1.
        bbox_y2 = pred_ur_y + 0.5 * pred_h - 1.

        bboxes = tf.concat(
            [bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1)

        return bboxes
semisupervised.py 文件源码 项目:TensorFlow-VAE 作者: dancsalo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _decoder(self, z):
        """Define p(x|z) network"""
        if z is None:
            mean = None
            stddev = None
            logits = None
            class_predictions = None
            input_sample = self.epsilon
        else:
            z = tf.reshape(z, [-1, self.flags['hidden_size'] * 2])
            mean, stddev = tf.split(1, 2, z)  # Compute latent variables (z) by calculating mean, stddev
            stddev = tf.sqrt(tf.exp(stddev))
            mlp = Layers(mean)
            mlp.fc(self.flags['num_classes'])
            class_predictions = mlp.get_output()
            logits = tf.nn.softmax(class_predictions)
            input_sample = mean + self.epsilon * stddev
        decoder = Layers(tf.expand_dims(tf.expand_dims(input_sample, 1), 1))
        decoder.deconv2d(3, 128, padding='VALID')
        decoder.deconv2d(3, 64, padding='VALID', stride=2)
        decoder.deconv2d(3, 64, stride=2)
        decoder.deconv2d(5, 32, stride=2)
        decoder.deconv2d(7, 1, activation_fn=tf.nn.tanh, s_value=None)
        return decoder.get_output(), mean, stddev, class_predictions, logits
unsupervised.py 文件源码 项目:TensorFlow-VAE 作者: dancsalo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _decoder(self, z):
        """ Define p(x|z) network"""
        if z is None:
            mean = None
            stddev = None
            input_sample = self.epsilon
        else:
            z = tf.reshape(z, [-1, self.flags['hidden_size'] * 2])
            print(z.get_shape())
            mean, stddev = tf.split(1, 2, z)
            stddev = tf.sqrt(tf.exp(stddev))
            input_sample = mean + self.epsilon * stddev
        decoder = Layers(tf.expand_dims(tf.expand_dims(input_sample, 1), 1))
        decoder.deconv2d(3, 128, padding='VALID')
        decoder.deconv2d(3, 128, padding='VALID', stride=2)
        decoder.deconv2d(3, 64, stride=2)
        decoder.deconv2d(3, 64, stride=2)
        decoder.deconv2d(5, 1, activation_fn=tf.nn.tanh, s_value=None)
        return decoder.get_output(), mean, stddev
ln_lstm.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):
            c, h = state

            # change bias argument to False since LN will add bias via shift
            concat = tf.nn.rnn_cell._linear(
                [inputs, h], 4 * self._num_units, False)

            i, j, f, o = tf.split(1, 4, concat)

            # add layer normalization to each gate
            i = ln(i, scope='i/')
            j = ln(j, scope='j/')
            f = ln(f, scope='f/')
            o = ln(o, scope='o/')

            new_c = (c * tf.nn.sigmoid(f + self._forget_bias) +
                     tf.nn.sigmoid(i) * self._activation(j))

            # add layer_normalization in calculation of new hidden state
            new_h = self._activation(
                ln(new_c, scope='new_h/')) * tf.nn.sigmoid(o)
            new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
            return new_h, new_state
image_reader.py 文件源码 项目:tensorflow-deeplab-lfov 作者: DrSleep 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def read_labeled_image_list(data_dir, data_list):
    """Reads txt file containing paths to images and ground truth masks.

    Args:
      data_dir: path to the directory with images and masks.
      data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.

    Returns:
      Two lists with all file names for images and masks, respectively.
    """
    f = open(data_list, 'r')
    images = []
    masks = []
    for line in f:
        image, mask = line.strip("\n").split(' ')
        images.append(data_dir + image)
        masks.append(data_dir + mask)
    return images, masks
vae.py 文件源码 项目:TensorFlow-ADGM 作者: dancsalo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def encoder(self, x):
        with tf.variable_scope('encoder'):
            net = resnet_utils.conv2d_same(x, 64, 7, stride=2, scope='conv1')
            net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
            x = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
            x_features_all, _ = resnet_v1.resnet_v1(x,
                                                self._blocks_encoder,
                                                global_pool=False,
                                                include_root_block=False,
                                                scope=self._resnet_scope)
            x_features_all = tf.reduce_mean(x_features_all, axis=[1, 2])
            x_features_labeled, x_features_unlabeled = tf.split(x_features_all, 2)

        x_features_tiled = tf.tile(x_features_unlabeled, [self._num_classes, 1])  # (100, 256) --> (2100, 256)
        x_features = tf.concat([x_features_labeled, x_features_tiled], 0) # (2100, 256) --> (2200, 256)
        return x_features
adgm.py 文件源码 项目:TensorFlow-ADGM 作者: dancsalo 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def encoder(self, x):
        with tf.variable_scope('encoder'):
            net = resnet_utils.conv2d_same(x, 64, 7, stride=2, scope='conv1')
            net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
            x = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
            x_features_all, _ = resnet_v1.resnet_v1(x,
                                                self._blocks_encoder,
                                                global_pool=False,
                                                include_root_block=False,
                                                scope=self._resnet_scope)
            x_features_all = tf.reduce_mean(x_features_all, axis=[1, 2])
            x_features_labeled, x_features_unlabeled = tf.split(x_features_all, 2)

        x_features_tiled = tf.tile(x_features_unlabeled, [self._num_classes, 1])  # (100, 256) --> (2100, 256)
        x_features = tf.concat([x_features_labeled, x_features_tiled], 0) # (2100, 256) --> (2200, 256)
        return x_features


问题


面经


文章

微信
公众号

扫码关注公众号