python类tuple()的实例源码

lstm_util.py 文件源码 项目:tensorlm 作者: batzner 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_state_reset_op(state_variables, cell, max_batch_size):
    """Returns an operation to set each variable in a list of LSTMStateTuples to zero.

    See get_state_variables() for more info.

    Args:
        state_variables (tuple[tf.contrib.rnn.LSTMStateTuple]): The LSTM's state variables.
        cell (tf.contrib.rnn.MuliRNNCell): An MultiRNNCell consisting of multiple LSTMCells.
        max_batch_size (int): The maximum size of batches that are be fed to the LSTMCell.

    Returns:
        tf.Operation: An operation that sets the LSTM's state to zero.
    """
    zero_states = cell.zero_state(max_batch_size, tf.float32)
    return get_state_update_op(state_variables, zero_states)
core.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _build_computation_graph(self, x, y, opt):
        """Builds the (device or runtime specific) computation graph.
        Parameters
        ----------
        x: n-D Tensor
            The inputs tensor.
        y: m-D Tensor
            The targets tensor.
        opt: Optimizer
            The TensorFlow optimizer instance.
        Returns
        ----------
        A tuple of (grads, summaries, total_loss, loss, eval_dict)
        """
        pass
core.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def datasets(self):
        """Gets the datasets as a named tuple.
           Use the members ds.train, ds.valid or ds.test
           of the returned tuple."""
        return self._datasets
core.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def placeholders(self):
        """Gets the placeholders as a named tuple."""
        return self._ph
core.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def gpu(self):
        """Gets the gpu config as a named tuple."""
        return self._gpu
batch_norm.py 文件源码 项目:DDPG 作者: MOCR 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, x, size, selectTrain, sess, toTarget=None, ts=0.001):

        self.sess = sess
        self.mean_x_train, self.variance_x_train = moments(x, [0])

        #self.mean_x_ma, self.variance_x_ma = moments(self.x_splh, [0])

        self.mean_x_ma = tf.Variable(tf.zeros([size]))
        self.variance_x_ma = tf.Variable(tf.ones([size]))


        self.update = tf.tuple([self.variance_x_ma.assign(0.95*self.variance_x_ma+ 0.05*self.variance_x_train)] , control_inputs=[self.mean_x_ma.assign(0.95*self.mean_x_ma+ 0.05*self.mean_x_train)])[0]
        self.mean_x_ma_update = tf.tuple([self.mean_x_train] , control_inputs=[])[0]
        self.printUp = tf.Print(self.mean_x_ma_update, [selectTrain], message="selectTrain value : ")
        self.variance_x_ma_update = tf.tuple([self.variance_x_train], control_inputs=[])[0]

        def getxmau(): return self.mean_x_ma_update
        def getxma(): return self.mean_x_ma    

        def getvxmau(): return self.variance_x_ma_update
        def getvxma(): return self.variance_x_ma

        self.mean_x = tf.cond(selectTrain, getxmau, getxma)
        self.variance_x = tf.cond(selectTrain, getvxmau, getvxma)

        self.beta = tf.Variable(tf.zeros([size]))
        self.gamma = tf.Variable(tf.ones([size]))

        #tfs.tfs.session.run(tf.initialize_variables([self.beta, self.gamma]))#, self.mean_x_ma, self.variance_x_ma]))
        self.xNorm = tf.reshape(tf.nn.batch_norm_with_global_normalization(tf.reshape(x, [-1, 1, 1, size]), self.mean_x, self.variance_x, self.beta, self.gamma, 0.01, True), [-1, size])

        if toTarget!=None:
            self.isTracking = toTarget
            self.updateBeta = self.beta.assign(self.beta*(1-ts)+self.isTracking.beta*ts)
            self.updateGamma = self.gamma.assign(self.gamma*(1-ts)+self.isTracking.gamma*ts)
            self.updateTarget = tf.group(self.updateBeta, self.updateGamma)
metrics.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def precision_recall(num_gbboxes, num_detections, tp, fp, scores,
                     dtype=tf.float64, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Input dictionaries: dict outputs as streaming metrics.
    if isinstance(scores, dict):
        d_precision = {}
        d_recall = {}
        for c in num_gbboxes.keys():
            scope = 'precision_recall_%s' % c
            p, r = precision_recall(num_gbboxes[c], num_detections[c],
                                    tp[c], fp[c], scores[c],
                                    dtype, scope)
            d_precision[c] = p
            d_recall[c] = r
        return d_precision, d_recall

    # Sort by score.
    with tf.name_scope(scope, 'precision_recall',
                       [num_gbboxes, num_detections, tp, fp, scores]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=num_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(num_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')
        return tf.tuple([precision, recall])
metrics.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
rev_block.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _rev_layer_forward(xs, f, g, f_side_input, g_side_input,
                       gate_outputs=False):
  """Forward for 1 reversible layer."""
  x1, x2 = xs
  y1 = x1 + (f(x2, f_side_input) if f_side_input else f(x2))
  y2 = x2 + (g(y1, g_side_input) if g_side_input else g(y1))
  if gate_outputs:
    return tf.tuple([y1, y2])
  else:
    return (y1, y2)
rev_block.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _recompute_grad(fn, args):
  """See recompute_grad."""

  cached_vs = []
  cached_arg_scope = []

  def grad_fn(inputs, variables, outputs, output_grads):
    """Recompute outputs for gradient computation."""
    del outputs
    # Recompute outputs
    with tf.control_dependencies(output_grads):
      with tf.contrib.framework.arg_scope(cached_arg_scope[0]):
        with tf.variable_scope(cached_vs[0], reuse=True):
          outputs = fn(*inputs)

    if not (isinstance(outputs, list) or isinstance(outputs, tuple)):
      outputs = [outputs]
    outputs = list(outputs)
    grads = tf.gradients(outputs, inputs + variables, output_grads)
    grad_inputs = grads[:len(inputs)]
    grad_vars = grads[len(inputs):]
    return grad_inputs, grad_vars

  @common_layers.fn_with_custom_grad(grad_fn)
  def fn_with_recompute(*args):
    cached_vs.append(tf.get_variable_scope())
    # TODO(rsepassi): Rm conditional in TF 1.5
    if hasattr(tf.contrib.framework, "current_arg_scope"):
      cached_arg_scope.append(tf.contrib.framework.current_arg_scope())
    else:
      cached_arg_scope.append({})
    return fn(*args)

  return fn_with_recompute(*args)
metrics.py 文件源码 项目:seglink 作者: dengdan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def precision_recall(num_gbboxes, tp, fp, scope=None):
    """Compute precision and recall from true positives and false
    positives booleans arrays
    """

    # Sort by score.
    with tf.name_scope(scope, 'precision_recall'):
        # Computer recall and precision.
        tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)
        fp = tf.reduce_sum(tf.cast(fp, tf.float32), axis=0)
        recall = tfe_math.safe_divide(tp, tf.cast(num_gbboxes, tf.float32), 'recall')
        precision = tfe_math.safe_divide(tp, tp + fp, 'precision')
        return tf.tuple([precision, recall])
Augment.py 文件源码 项目:RFCN-tensorflow 作者: xdever 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def mirror(image, boxes):
    def doMirror(image, boxes):
        image = tf.reverse(image, axis=[2])
        x0,y0,x1,y1 = tf.unstack(boxes, axis=1)

        w=tf.cast(tf.shape(image)[2], tf.float32)
        x0_m=w-x1
        x1_m=w-x0

        return image, tf.stack([x0_m,y0,x1_m,y1], axis=1)

    uniform_random = tf.random_uniform([], 0, 1.0)
    return tf.cond(uniform_random < 0.5, lambda: tf.tuple([image, boxes]), lambda: doMirror(image, boxes))
MultiGather.py 文件源码 项目:RFCN-tensorflow 作者: xdever 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def gatherTopK(t, k, others=[], sorted=False):
    res=[]
    with tf.name_scope("gather_top_k"):
        isMoreThanK = tf.shape(t)[-1]>k
        values, indices = tf.cond(isMoreThanK, lambda: tf.nn.top_k(t, k=k, sorted=sorted), lambda: tf.tuple([t, tf.zeros((0,1), tf.int32)]))
        indices = tf.reshape(indices, [-1,1])
        res.append(values)

        for o in others:
            res.append(tf.cond(isMoreThanK, lambda: tf.gather_nd(o, indices), lambda: o))

    return res
metrics.py 文件源码 项目:DAVIS-2016-Chanllege-Solution 作者: tangyuhao 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def precision_recall(num_gbboxes, num_detections, tp, fp, scores,
                     dtype=tf.float64, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Input dictionaries: dict outputs as streaming metrics.
    if isinstance(scores, dict):
        d_precision = {}
        d_recall = {}
        for c in num_gbboxes.keys():
            scope = 'precision_recall_%s' % c
            p, r = precision_recall(num_gbboxes[c], num_detections[c],
                                    tp[c], fp[c], scores[c],
                                    dtype, scope)
            d_precision[c] = p
            d_recall[c] = r
        return d_precision, d_recall

    # Sort by score.
    with tf.name_scope(scope, 'precision_recall',
                       [num_gbboxes, num_detections, tp, fp, scores]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=num_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(num_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')
        return tf.tuple([precision, recall])
metrics.py 文件源码 项目:DAVIS-2016-Chanllege-Solution 作者: tangyuhao 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
inputs.py 文件源码 项目:tf_classification 作者: visipedia 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def distorted_bounding_box_crop(image,
                                bbox,
                                min_object_covered=0.1,
                                aspect_ratio_range=(0.75, 1.33),
                                area_range=(0.05, 1.0),
                                max_attempts=100,
                                scope=None):
  """Generates cropped_image using a one of the bboxes randomly distorted.
  See `tf.image.sample_distorted_bounding_box` for more documentation.
  Args:
    image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
      where each coordinate is [0, 1) and the coordinates are arranged
      as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
      image.
    min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
      area of the image must contain at least this fraction of any bounding box
      supplied.
    aspect_ratio_range: An optional list of `floats`. The cropped area of the
      image must have an aspect ratio = width / height within this range.
    area_range: An optional list of `floats`. The cropped area of the image
      must contain a fraction of the supplied image within in this range.
    max_attempts: An optional `int`. Number of attempts at generating a cropped
      region of the image of the specified constraints. After `max_attempts`
      failures, return the entire image.
    scope: Optional scope for name_scope.
  Returns:
    A tuple, a 3-D Tensor cropped_image and the distorted bbox
  """
  with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
    # Each bounding box has shape [1, num_boxes, box coords] and
    # the coordinates are ordered [ymin, xmin, ymax, xmax].

    # A large fraction of image datasets contain a human-annotated bounding
    # box delineating the region of the image containing the object of interest.
    # We choose to create a new bounding box for the object which is a randomly
    # distorted version of the human-annotated bounding box that obeys an
    # allowed range of aspect ratios, sizes and overlap with the human-annotated
    # bounding box. If no box is supplied, then we assume the bounding box is
    # the entire image.
    sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
        tf.shape(image),
        bounding_boxes=bbox,
        min_object_covered=min_object_covered,
        aspect_ratio_range=aspect_ratio_range,
        area_range=area_range,
        max_attempts=max_attempts,
        use_image_if_no_bounding_boxes=True)
    bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box

    # Crop the image to the specified bounding box.
    cropped_image = tf.slice(image, bbox_begin, bbox_size)
    return tf.tuple([cropped_image, distort_bbox])
roi.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def non_max_suppression(inputs, scores, batch_size, max_output_size,
                        score_threshold=0.7, iou_threshold=0.7, nonempty=False, name='nms'):
    """ Perform NMS on batch of images.

    Parameters
    ----------
        inputs: tf.Tuple
            each components is a set of bboxes for corresponding image
        scores: tf.Tuple
            scores of inputs
        batch_size:
            size of batch of inputs
        max_output_size:
            maximal size of bboxes per image
        score_threshold: float
            bboxes with score less the score_threshold will be dropped
        iou_threshold: float
            bboxes with iou which is greater then iou_threshold will be merged
        nonempty: bool
            if True at least one bbox per image will be returned
        name: str
            scope name

    Returns
    -------
        tf.Tuple
            indices of selected bboxes for each image

    """
    with tf.variable_scope(name):
        ix = tf.constant(0)
        filtered_rois = tf.TensorArray(dtype=tf.int32, size=batch_size, infer_shape=False)
        loop_cond = lambda ix, filtered_rois: tf.less(ix, batch_size)
        def _loop_body(ix, filtered_rois):
            indices, score, roi = _filter_tensor(scores[ix], score_threshold, inputs[ix]) # pylint: disable=unbalanced-tuple-unpacking
            roi_corners = tf.concat([roi[:, :2], roi[:, :2]+roi[:, 2:]], axis=-1)
            roi_after_nms = tf.image.non_max_suppression(roi_corners, score, max_output_size, iou_threshold)
            if nonempty:
                is_not_empty = lambda: filtered_rois.write(ix,
                                                           tf.cast(tf.gather(indices, roi_after_nms),
                                                                   dtype=tf.int32))
                is_empty = lambda: filtered_rois.write(ix, tf.constant([[0]]))
                filtered_rois = tf.cond(tf.not_equal(tf.shape(indices)[0], 0), is_not_empty, is_empty)
            else:
                filtered_rois = filtered_rois.write(ix, tf.cast(tf.gather(indices, roi_after_nms), dtype=tf.int32))
            return [ix+1, filtered_rois]
        _, res = tf.while_loop(loop_cond, _loop_body, [ix, filtered_rois])
        res = _array_to_tuple(res, batch_size, [-1, 1])
    return res
faster_rcnn.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _rcn_head(self, inputs, image_shape, nms_threshold, rpn_thresholds,
                  rcn_batch, batch_size, name='rcn_head', **kwargs):
        anchors_labels = self.anchors_placeholders['labels']
        feature_maps, rpn_reg, rpn_cls = inputs
        n_anchors = self.n_anchors

        with tf.variable_scope(name):
            rcn_input_indices = non_max_suppression(rpn_reg, rpn_cls, batch_size, n_anchors,
                                                    iou_threshold=nms_threshold,
                                                    score_threshold=rpn_thresholds[1],
                                                    nonempty=True)

            rcn_input_indices = tf.cond(self.is_training,
                                        lambda: self.create_bbox_batch(rcn_input_indices, rcn_batch),
                                        lambda: rcn_input_indices)

            rcn_input_rois, rcn_input_labels = self._get_rois_and_labels(rpn_reg, anchors_labels, rcn_input_indices)

            for tensor in rcn_input_rois:
                tf.add_to_collection('roi', tensor)
            for tensor in rcn_input_labels:
                tf.add_to_collection('targets', tensor)
            roi_factor = np.array(self.map_shape/image_shape)

            rcn_input_rois = self.stop_gradient_tuple(rcn_input_rois)
            rcn_input_labels = self.stop_gradient_tuple(rcn_input_labels)

            roi_cropped = roi_pooling_layer(feature_maps, rcn_input_rois,
                                            factor=roi_factor, shape=(7, 7), data_format=kwargs['data_format'])
            indices, roi_cropped, rcn_input_labels = self._stack_tuple(roi_cropped, rcn_input_labels) # pylint: disable=unbalanced-tuple-unpacking
            rcn_clsf = conv_block(roi_cropped, 'f', units=10, name='output_conv', **kwargs)

            loss = self.rcn_loss(rcn_clsf, rcn_input_labels)

            rcn_clsf = tf.argmax(rcn_clsf, axis=-1)
            rcn_clsf = self._unstack_tuple(rcn_clsf, indices)
            rcn_clsf = tf.tuple(rcn_clsf, name='clsf')
            for tensor in rcn_clsf:
                tf.add_to_collection('rcn_output', tensor)
            loss = tf.identity(loss, 'loss')

        return rcn_clsf, loss
special_fn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def conv2d_v2(inputs, n_output_channels, is_training, reuse, **kwargs):
    """Adds a 2D dilated convolutional layer

        also known as convolution with holes or atrous convolution.
        If the rate parameter is equal to one, it performs regular 2-D convolution.
        If the rate parameter
        is greater than one, it performs convolution with holes, sampling the input
        values every rate pixels in the height and width dimensions.
        `convolutional layer` creates a variable called `weights`, representing a conv
        weight matrix, which is multiplied by the `x` to produce a
        `Tensor` of hidden units. If a `batch_norm` is provided (such as
        `batch_norm`), it is then applied. Otherwise, if `batch_norm` is
        None and a `b_init` and `use_bias` is provided then a `biases` variable would be
        created and added the hidden units. Finally, if `activation` is not `None`,
        it is applied to the hidden units as well.
        Note: that if `x` have a rank 4

    Args:
        x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
            i.e. `[batch_size, in_height, in_width, depth]`,
        is_training: Bool, training or testing
        n_output: Integer or long, the number of output units in the layer.
        reuse: whether or not the layer and its variables should be reused. To be
            able to reuse the layer scope must be given.
        filter_size: a int or list/tuple of 2 positive integers specifying the spatial
        dimensions of of the filters.
        dilation:  A positive int32. The stride with which we sample input values across
            the height and width dimensions. Equivalently, the rate by which we upsample the
            filter values by inserting zeros across the height and width dimensions. In the literature,
            the same parameter is sometimes called input stride/rate or dilation.
        padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the input to use Valid padding
        activation: activation function, set to None to skip it and maintain
            a linear activation.
        batch_norm: normalization function to use. If
            `batch_norm` is `True` then google original implementation is used and
            if another function is provided then it is applied.
            default set to None for no normalizer function
        batch_norm_args: normalization function parameters.
        w_init: An initializer for the weights.
        w_regularizer: Optional regularizer for the weights.
        untie_biases: spatial dimensions wise baises
        b_init: An initializer for the biases. If None skip biases.
        outputs_collections: The collections to which the outputs are added.
        trainable: If `True` also add variables to the graph collection
            `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
        name: Optional name or scope for variable_scope/name_scope.
        use_bias: Whether to add bias or not

    Returns:
        The 4-D `Tensor` variable representing the result of the series of operations.
        e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].

    Raises:
        ValueError: if x has rank less than 4 or if its last dimension is not set.
    """
    if 'padding' in kwargs and kwargs['padding'] == 'LEFT':
        inputs, kwargs = format_input_left_padding(inputs, **kwargs)
    return dilated_conv2d(inputs, n_output_channels, is_training, reuse, **kwargs)
special_fn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def conv2d_gru(inputs, n_output_channels, is_training, reuse, filter_size=3, padding="SAME", dilation=1, name='conv2d_gru', outputs_collections=None, **kwargs):
    """Adds a convolutional GRU layer in 1 dimension

    Args:
        x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
            i.e. `[batch_size, in_height, in_width, depth]`,
        is_training: Bool, training or testing
        n_output: Integer or long, the number of output units in the layer.
        reuse: whether or not the layer and its variables should be reused. To be
            able to reuse the layer scope must be given.
        filter_size: a int or list/tuple of 2 positive integers specifying the spatial
        dimensions of of the filters.
        dilation:  A positive int32. The stride with which we sample input values across
            the height and width dimensions. Equivalently, the rate by which we upsample the
            filter values by inserting zeros across the height and width dimensions. In the literature,
            the same parameter is sometimes called input stride/rate or dilation.
        padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the input to use Valid padding
        activation: activation function, set to None to skip it and maintain
            a linear activation.
        batch_norm: normalization function to use. If
            `batch_norm` is `True` then google original implementation is used and
            if another function is provided then it is applied.
            default set to None for no normalizer function
        batch_norm_args: normalization function parameters.
        w_init: An initializer for the weights.
        w_regularizer: Optional regularizer for the weights.
        untie_biases: spatial dimensions wise baises
        b_init: An initializer for the biases. If None skip biases.
        outputs_collections: The collections to which the outputs are added.
        trainable: If `True` also add variables to the graph collection
            `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
        name: Optional name or scope for variable_scope/name_scope.
        use_bias: Whether to add bias or not

    Returns:
        The 4-D `Tensor` variable representing the result of the series of operations.
        e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].

    Raises:
        ValueError: if x has rank less than 4 or if its last dimension is not set.
    """
    def conv2d_fn(x, name, bias_start, padding):
        return conv2d_v2(x, n_output_channels, is_training, reuse, filter_size=filter_size, padding=padding, b_init=bias_start, dilation=dilation, name=name, **kwargs)

    with tf.variable_scope(name, reuse=reuse):
        reset = saturating_sigmoid(conv2d_fn(inputs, "reset", 1.0, padding))
        gate = saturating_sigmoid(conv2d_fn(inputs, "gate", 1.0, padding))
        candidate = tf.tanh(
            conv2d_fn(reset * inputs, "candidate", 0.0, padding))
        outputs = gate * inputs + (1 - gate) * candidate
        return _collect_named_outputs(outputs_collections, name, outputs)


问题


面经


文章

微信
公众号

扫码关注公众号