python类Dimension()的实例源码

nn.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def conditional_bilinear_classifier(self, inputs1, inputs2, n_classes, probs, add_bias1=True, add_bias2=True):
    """"""

    input_shape = tf.shape(inputs1)
    batch_size = input_shape[0]
    bucket_size = input_shape[1]
    input_size = inputs1.get_shape().as_list()[-1]
    input_shape_to_set = [tf.Dimension(None), tf.Dimension(None), input_size+1]
    output_shape = tf.pack([batch_size, bucket_size, n_classes, bucket_size])
    if len(probs.get_shape().as_list()) == 2:
      probs = tf.to_float(tf.one_hot(tf.to_int64(probs), bucket_size, 1, 0))
    else:
      probs = tf.stop_gradient(probs)

    if self.moving_params is None:
      keep_prob = self.mlp_keep_prob
    else:
      keep_prob = 1
    if isinstance(keep_prob, tf.Tensor) or keep_prob < 1:
      noise_shape = tf.pack([batch_size, 1, input_size])
      inputs1 = tf.nn.dropout(inputs1, keep_prob, noise_shape=noise_shape)
      inputs2 = tf.nn.dropout(inputs2, keep_prob, noise_shape=noise_shape)

    inputs1 = tf.concat(2, [inputs1, tf.ones(tf.pack([batch_size, bucket_size, 1]))])
    inputs1.set_shape(input_shape_to_set)
    inputs2 = tf.concat(2, [inputs2, tf.ones(tf.pack([batch_size, bucket_size, 1]))])
    inputs2.set_shape(input_shape_to_set)

    bilin = linalg.bilinear(inputs1, inputs2,
                     n_classes,
                     add_bias1=add_bias1,
                     add_bias2=add_bias2,
                     initializer=tf.zeros_initializer,
                     moving_params=self.moving_params)
    weighted_bilin = tf.batch_matmul(bilin, tf.expand_dims(probs, 3))

    return weighted_bilin, bilin

  #=============================================================
dataset.py 文件源码 项目:THUMT 作者: thumt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_inference_input(inputs, params):
    dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(inputs)
    )

    # Split string
    dataset = dataset.map(lambda x: tf.string_split([x]).values,
                          num_parallel_calls=params.num_threads)

    # Append <eos>
    dataset = dataset.map(
        lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),
        num_parallel_calls=params.num_threads
    )

    # Convert tuple to dictionary
    dataset = dataset.map(
        lambda x: {"source": x, "source_length": tf.shape(x)[0]},
        num_parallel_calls=params.num_threads
    )

    dataset = dataset.padded_batch(
        params.decode_batch_size,
        {"source": [tf.Dimension(None)], "source_length": []},
        {"source": params.pad, "source_length": 0}
    )

    iterator = dataset.make_one_shot_iterator()
    features = iterator.get_next()

    src_table = tf.contrib.lookup.index_table_from_tensor(
        tf.constant(params.vocabulary["source"]),
        default_value=params.mapping["source"][params.unk]
    )
    features["source"] = src_table.lookup(features["source"])

    return features
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def fc(self, output_nodes, keep_prob=1, activation_fn=tf.nn.relu, b_value=0.0, s_value=1.0, bn=True,
           trainable=True):
        """
        Fully Connected Layer
        :param output_nodes: int
        :param keep_prob: int. set to 1 for no dropout
        :param activation_fn: tf.nn function
        :param b_value: float or None
        :param s_value: float or None
        :param bn: bool
        """
        self.count['fc'] += 1
        scope = 'fc_' + str(self.count['fc'])
        with tf.variable_scope(scope):

            # Flatten if necessary
            if len(self.input.get_shape()) == 4:
                input_nodes = tf.Dimension(
                    self.input.get_shape()[1] * self.input.get_shape()[2] * self.input.get_shape()[3])
                output_shape = tf.stack([-1, input_nodes])
                self.input = tf.reshape(self.input, output_shape)

            # Matrix Multiplication Function
            input_nodes = self.input.get_shape()[1]
            output_shape = [input_nodes, output_nodes]
            w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
            self.input = tf.matmul(self.input, w)

            if bn is True:  # batch normalization
                self.input = self.batch_norm(self.input, 'fc')
            if b_value is not None:  # bias value
                b = self.const_variable(name='bias', shape=[output_nodes], value=b_value, trainable=trainable)
                self.input = tf.add(self.input, b)
            if s_value is not None:  # scale value
                s = self.const_variable(name='scale', shape=[output_nodes], value=s_value, trainable=trainable)
                self.input = tf.multiply(self.input, s)
            if activation_fn is not None:  # activation function
                self.input = activation_fn(self.input)
            if keep_prob != 1:  # dropout function
                self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
        print(scope + ' output: ' + str(self.input.get_shape()))
layers.py 文件源码 项目:fast-wavenet 作者: tomlepaine 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dilated_conv1d(inputs,
                   out_channels,
                   filter_width=2,
                   rate=1,
                   padding='VALID',
                   name=None,
                   gain=np.sqrt(2),
                   activation=tf.nn.relu):
    '''

    Args:
      inputs: (tensor)
      output_channels:
      filter_width:
      rate:
      padding:
      name:
      gain:
      activation:

    Outputs:
      outputs: (tensor)
    '''
    assert name
    with tf.variable_scope(name):
        _, width, _ = inputs.get_shape().as_list()
        inputs_ = time_to_batch(inputs, rate=rate)
        outputs_ = conv1d(inputs_,
                          out_channels=out_channels,
                          filter_width=filter_width,
                          padding=padding,
                          gain=gain,
                          activation=activation)
        _, conv_out_width, _ = outputs_.get_shape().as_list()
        new_width = conv_out_width * rate
        diff = new_width - width
        outputs = batch_to_time(outputs_, rate=rate, crop_left=diff)

        # Add additional shape information.
        tensor_shape = [tf.Dimension(None),
                        tf.Dimension(width),
                        tf.Dimension(out_channels)]
        outputs.set_shape(tf.TensorShape(tensor_shape))

    return outputs
custom_ops.py 文件源码 项目:tensorflow-input-pipelines 作者: ischlag 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def log_number_of_params():
  total_parameters = 0
  for variable in tf.trainable_variables():
    # shape is an array of tf.Dimension
    shape = variable.get_shape()
    #tf.logging.info('Shape: %s', shape)
    #tf.logging.info('shape length: %s', len(shape))
    variable_parametes = 1
    for dim in shape:
      #tf.logging.info('dim: %s', dim)
      variable_parametes *= dim.value
    #tf.logging.info('variable params: %s', variable_parametes)
    total_parameters += variable_parametes
  tf.logging.info('Total number of parameters: %s', total_parameters)
dataset.py 文件源码 项目:XMUNMT 作者: XMUNLP 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_evaluation_input(inputs, params):
    with tf.device("/cpu:0"):
        # Create datasets
        datasets = []

        for data in inputs:
            dataset = tf.data.Dataset.from_tensor_slices(data)
            # Split string
            dataset = dataset.map(lambda x: tf.string_split([x]).values,
                                  num_parallel_calls=params.num_threads)
            # Append <eos>
            dataset = dataset.map(
                lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),
                num_parallel_calls=params.num_threads
            )
            datasets.append(dataset)

        dataset = tf.data.Dataset.zip(tuple(datasets))

        # Convert tuple to dictionary
        dataset = dataset.map(
            lambda *x: {
                "source": x[0],
                "source_length": tf.shape(x[0])[0],
                "references": x[1:]
            },
            num_parallel_calls=params.num_threads
        )

        dataset = dataset.padded_batch(
            params.eval_batch_size,
            {
                "source": [tf.Dimension(None)],
                "source_length": [],
                "references": (tf.Dimension(None),) * (len(inputs) - 1)
            },
            {
                "source": params.pad,
                "source_length": 0,
                "references": (params.pad,) * (len(inputs) - 1)
            }
        )

        iterator = dataset.make_one_shot_iterator()
        features = iterator.get_next()

        src_table = tf.contrib.lookup.index_table_from_tensor(
            tf.constant(params.vocabulary["source"]),
            default_value=params.mapping["source"][params.unk]
        )
        tgt_table = tf.contrib.lookup.index_table_from_tensor(
            tf.constant(params.vocabulary["target"]),
            default_value=params.mapping["target"][params.unk]
        )
        features["source"] = src_table.lookup(features["source"])
        features["references"] = tuple(
            tgt_table.lookup(item) for item in features["references"]
        )

    return features
core.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def show_trainable_parameters(verbose=False):
    """Shows the number of trainable parameters in this graph.
    Parameters
    ----------
    verbose: Boolean, optional
        Show additional information and list the number of trainable
        variables per variable, not just the total sum.
    """
    total_width = 80
    trainable_vars = tf.trainable_variables()

    if len(trainable_vars) == 0:
        print("No model-params found.")
        return

    if verbose:
        print("-" * total_width)

    total_parameters = 0
    groups = {}
    for var in trainable_vars:
        # shape is an array of tf.Dimension
        shape = var.get_shape()
        var_params = 1
        for dim in shape:
            var_params *= dim.value
        if verbose:
            print("{:69} | {:8d}".format(var.name, var_params))

        total_parameters += var_params

        group_name = var.name.split('/')[0]
        if group_name in groups:
            groups[group_name] += var_params
        else:
            groups.update({group_name: var_params})

    print("-" * total_width)
    for group, count in groups.iteritems():
        print("{:69} | {:8d}".format(group, count))
    print("=" * total_width)
    print("{:69} | {:8d}".format("TOTAL", total_parameters))
    print("-" * total_width)
nn.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def MLP(self, inputs, n_splits=1):
    """"""

    n_dims = len(inputs.get_shape().as_list())
    batch_size = tf.shape(inputs)[0]
    bucket_size = tf.shape(inputs)[1]
    input_size = inputs.get_shape().as_list()[-1]
    output_size = self.mlp_size
    output_shape = tf.pack([batch_size] + [bucket_size]*(n_dims-2) + [output_size])
    shape_to_set = [tf.Dimension(None)]*(n_dims-1) + [tf.Dimension(output_size)]

    if self.moving_params is None:
      if self.drop_gradually:
        s = self.global_sigmoid
        keep_prob = s + (1-s)*self.mlp_keep_prob
      else:
        keep_prob = self.mlp_keep_prob
    else:
      keep_prob = 1
    if isinstance(keep_prob, tf.Tensor) or keep_prob < 1:
      noise_shape = tf.pack([batch_size] + [1]*(n_dims-2) + [input_size])
      inputs = tf.nn.dropout(inputs, keep_prob, noise_shape=noise_shape)

    linear = linalg.linear(inputs,
                        output_size,
                        n_splits=n_splits,
                        add_bias=True,
                        moving_params=self.moving_params)
    if n_splits == 1:
      linear = [linear]
    for i, split in enumerate(linear):
      split = self.mlp_func(split)
      split.set_shape(shape_to_set)
      linear[i] = split
    if self.moving_params is None:
      with tf.variable_scope('Linear', reuse=True):
        matrix = tf.get_variable('Weights')
        I = tf.diag(tf.ones([self.mlp_size]))
        for W in tf.split(1, n_splits, matrix):
          WTWmI = tf.matmul(W, W, transpose_a=True) - I
          tf.add_to_collection('ortho_losses', tf.nn.l2_loss(WTWmI))
      for split in linear:
        tf.add_to_collection('covar_losses', self.covar_loss(split))
    if n_splits == 1:
      return linear[0]
    else:
      return linear

  #=============================================================
nn.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def double_MLP(self, inputs, n_splits=1):
    """"""

    batch_size = tf.shape(inputs)[0]
    bucket_size = tf.shape(inputs)[1]
    input_size = inputs.get_shape().as_list()[-1]
    output_size = self.mlp_size
    output_shape = tf.pack([batch_size, bucket_size, bucket_size, output_size])
    shape_to_set = [tf.Dimension(None), tf.Dimension(None), tf.Dimension(None), tf.Dimension(output_size)]

    if self.moving_params is None:
      if self.drop_gradually:
        s = self.global_sigmoid
        keep_prob = s + (1-s)*self.mlp_keep_prob
      else:
        keep_prob = self.mlp_keep_prob
    else:
      keep_prob = 1
    if isinstance(keep_prob, tf.Tensor) or keep_prob < 1:
      noise_shape = tf.pack([batch_size, 1, input_size])
      inputs = tf.nn.dropout(inputs, keep_prob, noise_shape=noise_shape)

    lin1, lin2 = linalg.linear(inputs,
                               output_size*n_splits,
                               n_splits=2,
                               add_bias=True,
                               moving_params=self.moving_params)
    lin1 = tf.reshape(tf.transpose(lin1, [0, 2, 1]), tf.pack([-1, bucket_size, 1]))
    lin2 = tf.reshape(tf.transpose(lin2, [0, 2, 1]), tf.pack([-1, 1, bucket_size]))
    lin = lin1 + lin2
    lin = tf.reshape(lin, tf.pack([batch_size, n_splits*output_size, bucket_size, bucket_size]))
    lin = tf.transpose(lin, [0,2,3,1])
    top_mlps = tf.split(3, n_splits, self.mlp_func(lin))
    for top_mlp in top_mlps:
      top_mlp.set_shape(shape_to_set)
    if self.moving_params is None:
      with tf.variable_scope('Linear', reuse=True):
        matrix = tf.get_variable('Weights')
        I = tf.diag(tf.ones([self.mlp_size]))
        for W in tf.split(1, 2*n_splits, matrix):
          WTWmI = tf.matmul(W, W, transpose_a=True) - I
          tf.add_to_collection('ortho_losses', tf.nn.l2_loss(WTWmI))
      for split in top_mlps:
        tf.add_to_collection('covar_losses', self.covar_loss(split))
    if n_splits == 1:
      return top_mlps[0]
    else:
      return top_mlps

  #=============================================================
nn.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conditional_diagonal_bilinear_classifier(self, inputs1, inputs2, n_classes, probs, add_bias1=True, add_bias2=True):
    """"""

    input_shape = tf.shape(inputs1)
    batch_size = input_shape[0]
    bucket_size = input_shape[1]
    input_size = inputs1.get_shape().as_list()[-1]
    input_shape_to_set = [tf.Dimension(None), tf.Dimension(None), input_size+1]
    output_shape = tf.pack([batch_size, bucket_size, n_classes, bucket_size])
    if len(probs.get_shape().as_list()) == 2:
      probs = tf.to_float(tf.one_hot(tf.to_int64(probs), bucket_size, 1, 0))
    else:
      probs = tf.stop_gradient(probs)

    if self.moving_params is None:
      if self.drop_gradually:
        s = self.global_sigmoid
        keep_prob = s + (1-s)*self.mlp_keep_prob
      else:
        keep_prob = self.mlp_keep_prob
    else:
      keep_prob = 1
    if isinstance(keep_prob, tf.Tensor) or keep_prob < 1:
      noise_shape = tf.pack([batch_size, 1, input_size])
      inputs1 = tf.nn.dropout(inputs1, tf.sqrt(keep_prob), noise_shape=noise_shape)
      inputs2 = tf.nn.dropout(inputs2, tf.sqrt(keep_prob), noise_shape=noise_shape)

    inputs1 = tf.concat(2, [inputs1, tf.ones(tf.pack([batch_size, bucket_size, 1]))])
    inputs1.set_shape(input_shape_to_set)
    inputs2 = tf.concat(2, [inputs2, tf.ones(tf.pack([batch_size, bucket_size, 1]))])
    inputs2.set_shape(input_shape_to_set)

    bilin = linalg.diagonal_bilinear(inputs1, inputs2,
                                     n_classes,
                                     add_bias1=add_bias1,
                                     add_bias2=add_bias2,
                                     initializer=tf.zeros_initializer,
                                     moving_params=self.moving_params)
    weighted_bilin = tf.batch_matmul(bilin, tf.expand_dims(probs, 3))

    return weighted_bilin, bilin

  #=============================================================
nn.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def conditional_bilinear_classifier(self, inputs1, inputs2, n_classes, probs, add_bias1=True, add_bias2=True):
    """"""

    input_shape = tf.shape(inputs1)
    batch_size = input_shape[0]
    bucket_size = input_shape[1]
    input_size = inputs1.get_shape().as_list()[-1]
    input_shape_to_set = [tf.Dimension(None), tf.Dimension(None), input_size+1]
    output_shape = tf.pack([batch_size, bucket_size, n_classes, bucket_size])
    if len(probs.get_shape().as_list()) == 2:
      probs = tf.to_float(tf.one_hot(tf.to_int64(probs), bucket_size, 1, 0))
    else:
      probs = tf.stop_gradient(probs)

    if self.moving_params is None:
      if self.drop_gradually:
        s = self.global_sigmoid
        keep_prob = s + (1-s)*self.mlp_keep_prob
      else:
        keep_prob = self.mlp_keep_prob
    else:
      keep_prob = 1
    if isinstance(keep_prob, tf.Tensor) or keep_prob < 1:
      noise_shape = tf.pack([batch_size, 1, input_size])
      inputs1 = tf.nn.dropout(inputs1, keep_prob, noise_shape=noise_shape)
      inputs2 = tf.nn.dropout(inputs2, keep_prob, noise_shape=noise_shape)

    inputs1 = tf.concat(2, [inputs1, tf.ones(tf.pack([batch_size, bucket_size, 1]))])
    inputs1.set_shape(input_shape_to_set)
    inputs2 = tf.concat(2, [inputs2, tf.ones(tf.pack([batch_size, bucket_size, 1]))])
    inputs2.set_shape(input_shape_to_set)

    bilin = linalg.bilinear(inputs1, inputs2,
                     n_classes,
                     add_bias1=add_bias1,
                     add_bias2=add_bias2,
                     initializer=tf.zeros_initializer,
                     moving_params=self.moving_params)
    weighted_bilin = tf.batch_matmul(bilin, tf.expand_dims(probs, 3))

    return weighted_bilin, bilin

  #=============================================================
linalg.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def linear(inputs, output_size, add_bias=True, n_splits=1, initializer=None, scope=None, moving_params=None):
  """"""

  if not isinstance(inputs, (list, tuple)):
    inputs = [inputs]
  output_size *= n_splits

  with tf.variable_scope(scope or 'Linear'):
    # Reformat the input
    total_input_size = 0
    shapes = [a.get_shape().as_list() for a in inputs]
    for shape in shapes:
      total_input_size += shape[-1]
    input_shape = tf.shape(inputs[0])
    output_shape = []
    for i in xrange(len(shapes[0])):
      output_shape.append(input_shape[i])
    output_shape[-1] = output_size
    output_shape = tf.pack(output_shape)
    for i, (input_, shape) in enumerate(zip(inputs, shapes)):
      inputs[i] = tf.reshape(input_, [-1, shape[-1]])
    concatenation = tf.concat(1, inputs)

    # Get the matrix
    if initializer is None and moving_params is None:
      mat = orthonormal_initializer(total_input_size, output_size//n_splits)
      mat = np.concatenate([mat]*n_splits, axis=1)
      initializer = tf.constant_initializer(mat)
    matrix = tf.get_variable('Weights', [total_input_size, output_size], initializer=initializer)
    if moving_params is not None:
      matrix = moving_params.average(matrix)
    else:
      tf.add_to_collection('Weights', matrix)

    # Get the bias
    if add_bias:
      bias = tf.get_variable('Biases', [output_size], initializer=tf.zeros_initializer)
      if moving_params is not None:
        bias = moving_params.average(bias)
    else:
      bias = 0

    # Do the multiplication
    new = tf.matmul(concatenation, matrix) + bias
    new = tf.reshape(new, output_shape)
    new.set_shape([tf.Dimension(None) for _ in xrange(len(shapes[0])-1)] + [tf.Dimension(output_size)])
    if n_splits > 1:
      return tf.split(len(new.get_shape().as_list())-1, n_splits, new)
    else:
      return new

#===============================================================
linalg.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def diagonal_bilinear(inputs1, inputs2, output_size, add_bias2=True, add_bias1=True, add_bias=False, initializer=None, scope=None, moving_params=None):
  """"""

  with tf.variable_scope(scope or 'Bilinear'):
    # Reformat the inputs
    ndims = len(inputs1.get_shape().as_list())
    inputs1_shape = tf.shape(inputs1)
    inputs2_shape = tf.shape(inputs2)
    inputs1_bucket_size = inputs1_shape[ndims-2]
    inputs2_bucket_size = inputs2_shape[ndims-2]

    inputs1_size = inputs1.get_shape().as_list()[-1]
    inputs2_size = inputs2.get_shape().as_list()[-1]
    assert inputs1_size == inputs2_size

    output_shape = []
    batch_size = 1
    for i in xrange(ndims-2):
      batch_size *= inputs1_shape[i]
      output_shape.append(inputs1_shape[i])
    output_shape.append(inputs1_bucket_size)
    output_shape.append(output_size)
    output_shape.append(inputs2_bucket_size)
    output_shape = tf.pack(output_shape)
    inputs1 = tf.reshape(inputs1, tf.pack([batch_size, inputs1_bucket_size, inputs1_size]))
    inputs2 = tf.reshape(inputs2, tf.pack([batch_size, inputs2_bucket_size, inputs2_size]))
    inputs1.set_shape([tf.Dimension(None)]*2 + [tf.Dimension(inputs1_size)])
    inputs2.set_shape([tf.Dimension(None)]*2 + [tf.Dimension(inputs2_size)])

    inputs = broadcast_mult(inputs1, inputs2)
    with tf.variable_scope('Bilinear'):
      bilin = linear(inputs, output_size, add_bias=add_bias, initializer=initializer, scope=scope, moving_params=moving_params)
    with tf.variable_scope('Linear1'):
      lin1 = linear(inputs1, output_size, add_bias=False, initializer=initializer, scope=scope, moving_params=moving_params)
      lin1 = tf.expand_dims(lin1, 2)
    with tf.variable_scope('Linear2'):
      lin2 = linear(inputs2, output_size, add_bias=False, initializer=initializer, scope=scope, moving_params=moving_params)
      lin2 = tf.expand_dims(lin2, 1)

    bilin = tf.transpose(bilin+lin1+lin2, [0,1,3,2])

    return bilin

#===============================================================
helpers.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def set_shape(tensor, shape):
  """ This function will filling the missing shape information
  of given tensor
  """
  if not is_tensor(tensor):
    raise ValueError('tensor must be instance of `Tensor`.')
  # ====== Test ====== #
  ndims = tensor.get_shape().ndims
  shape = as_tuple(shape)
  if ndims != len(shape):
    raise ValueError("The tensor has %d dimensions, but the given shape "
                     "has %d dimension." % (ndims, len(shape)))
  # ====== DO it ====== #
  old_shape = tensor.get_shape()
  new_shape = []
  for old, new in zip(old_shape, shape):
    old_value = old.value
    if isinstance(new, tf.Dimension):
      new = new.value
    # matching old and new values
    if old_value is not None and new is not None:
      if old_value != new:
        raise ValueError("Known shape information mismatch, from tensorflow"
            ":%s, and given shape:%s." %
            (str(old_shape.as_list()), str(shape)))
      else:
        new_shape.append(old_value)
    elif old_value is None and new is not None:
      new_shape.append(new)
    elif old_value is not None and new is None:
      new_shape.append(old_value)
    elif old is None and new is None:
      new_shape.append(old)
    else:
      new_shape.append(None)
  tensor.set_shape(new_shape)
  return tensor


# ===========================================================================
# VALUE MANIPULATION
# ===========================================================================
dataset.py 文件源码 项目:THUMT 作者: thumt 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_evaluation_input(inputs, params):
    with tf.device("/cpu:0"):
        # Create datasets
        datasets = []

        for data in inputs:
            dataset = tf.data.Dataset.from_tensor_slices(data)
            # Split string
            dataset = dataset.map(lambda x: tf.string_split([x]).values,
                                  num_parallel_calls=params.num_threads)
            # Append <eos>
            dataset = dataset.map(
                lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),
                num_parallel_calls=params.num_threads
            )
            datasets.append(dataset)

        dataset = tf.data.Dataset.zip(tuple(datasets))

        # Convert tuple to dictionary
        dataset = dataset.map(
            lambda *x: {
                "source": x[0],
                "source_length": tf.shape(x[0])[0],
                "references": x[1:]
            },
            num_parallel_calls=params.num_threads
        )

        dataset = dataset.padded_batch(
            params.eval_batch_size,
            {
                "source": [tf.Dimension(None)],
                "source_length": [],
                "references": (tf.Dimension(None),) * (len(inputs) - 1)
            },
            {
                "source": params.pad,
                "source_length": 0,
                "references": (params.pad,) * (len(inputs) - 1)
            }
        )

        iterator = dataset.make_one_shot_iterator()
        features = iterator.get_next()

        src_table = tf.contrib.lookup.index_table_from_tensor(
            tf.constant(params.vocabulary["source"]),
            default_value=params.mapping["source"][params.unk]
        )
        tgt_table = tf.contrib.lookup.index_table_from_tensor(
            tf.constant(params.vocabulary["target"]),
            default_value=params.mapping["target"][params.unk]
        )
        features["source"] = src_table.lookup(features["source"])
        features["references"] = tuple(
            tgt_table.lookup(item) for item in features["references"]
        )

    return features


问题


面经


文章

微信
公众号

扫码关注公众号