python类placeholder_with_default()的实例源码

LeNetBN.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
VGGDirectDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder to enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
VGGBN.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
LeNet.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
LeNetDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 62 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
VGGDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
shaders.py 文件源码 项目:tf.rasterizer 作者: vahidk 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self):
        self.vertices = tf.placeholder(tf.float32, [None, 3])
        self.normals = tf.placeholder(tf.float32, [None, 3])
        self.uvs = tf.placeholder(tf.float32, [None, 2])
        self.texture = tf.placeholder(tf.float32, [None, None, 3])

        default_light_dir = np.array([-1, -1, -1], dtype=np.float32)
        default_ambient = np.array([0.5, 0.5, 0.5], dtype=np.float32)
        default_diffuse = np.array([1, 1, 1], dtype=np.float32)
        default_wvp = np.eye(4, dtype=np.float32)

        self.light_dir = tf.placeholder_with_default(default_light_dir, [3])
        self.ambient = tf.placeholder_with_default(default_ambient, [3])
        self.diffuse = tf.placeholder_with_default(default_diffuse, [3])
        self.wvp = tf.placeholder_with_default(default_wvp, [4, 4])

        self.packed_texture = utils.pack_colors(self.texture, 2, False)
        self.iwvp = tf.matrix_inverse(self.wvp)

        self.varying_uv = [None, None, None]
        self.varying_norm = [None, None, None]
embed_mixture.py 文件源码 项目:lda2vec-tf 作者: meereeum 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, n_documents, n_topics, n_dim, temperature=1.0,
                 W_in=None, factors_in=None):
        self.n_documents = n_documents
        # self.n_topics = n_topics
        # self.n_dim = n_dim
        self.temperature = temperature
        self.dropout = tf.placeholder_with_default(1., shape=[], name="dropout")

        scalar = 1 / np.sqrt(n_documents + n_topics)

        self.W = (tf.Variable( # unnormalized embedding weights
            tf.random_normal([n_documents, n_topics], mean=0, stddev=50*scalar),
                name="doc_embeddings") if W_in is None else W_in)

        # factors = (tf.Variable( # topic vectors
        #       _orthogonal_matrix((n_topics, n_dim)).astype("float32") * scalar,
        #       name="topics") if factors_in is None else factors_in)

        # tf 0.12.0 only
        factors = (tf.get_variable("topics", shape=(n_topics, n_dim),
                                   dtype=tf.float32, initializer=
                                   tf.orthogonal_initializer(gain=scalar))
                   if factors_in is None else factors_in)
        self.factors = tf.nn.dropout(factors, self.dropout)
bawn.py 文件源码 项目:WaveNet-Enhancement 作者: auspicious3000 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def data_initializer_prior(data_segments, data_labels):
    # Input data
    segments_initializer = tf.placeholder_with_default(
        tf.zeros(data_segments.shape, tf.int32),
        shape=data_segments.shape,
        name='segments_initializer')
    labels_initializer = tf.placeholder_with_default(
        tf.zeros(data_labels.shape, tf.int32),
        shape=data_labels.shape,
        name='labels_initializer')
    input_segments = tf.Variable(
          segments_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_segments')
    input_labels = tf.Variable(
          labels_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_labels')    

    return (segments_initializer, labels_initializer, input_segments, input_labels)
bawn.py 文件源码 项目:WaveNet-Enhancement 作者: auspicious3000 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def data_initializer_simple(data_segments, data_labels):
    # Input data
    segments_initializer = tf.placeholder_with_default(
        tf.zeros(data_segments.shape, tf.float32),
        shape=data_segments.shape,
        name='segments_initializer')
    labels_initializer = tf.placeholder_with_default(
        tf.zeros(data_labels.shape, tf.int32),
        shape=data_labels.shape,
        name='labels_initializer')
    input_segments = tf.Variable(
          segments_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_segments')
    input_labels = tf.Variable(
          labels_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_labels')    

    return (segments_initializer, labels_initializer, input_segments, input_labels)
aggressive_multi_head_UNET_2d.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_inputs(self, image_size):
        print("Creating input Placeholders...")
        #input image
        self.X = tf.placeholder(dtype=tf.float32, 
            shape=[None, image_size[0], image_size[1], 1],
            name="in")

        #Outputs of the different heads

        #Nodule head
        self.Y_nodule = tf.placeholder(dtype=tf.float32,
            shape=[None, image_size[0], image_size[1], 1],
            name="out_nodule")
        self.Y_nodule_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="nodule_weight")

        #Cancer head
        self.Y_cancer = tf.placeholder(dtype=tf.float32,
            shape=[None, 1],
            name="out_cancer")
        self.Y_cancer_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="cancer_weight")

        #Boolean variables to check head and mode
        self.is_training = tf.placeholder(dtype=tf.bool,
            name="is_training")
        self.is_nodule = tf.placeholder(dtype=tf.bool,
            name="is_nodule")
        self.is_cancer = tf.placeholder(dtype=tf.bool,
            name="is_cancer")

        #Probability for dropout
        self.drop_prob = tf.placeholder_with_default(input=0.0,
            shape=None,
            name="dropout_probability")

        print("Created input placeholders!")
sample.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def interpret_dict( a_dict, model,n_times=1, on_logits=True):
    '''
    pass either a do_dict or a cond_dict.
    The rules for converting arguments to numpy arrays to pass
    to tensorflow are identical
    '''
    if a_dict is None:
        return {}
    elif len(a_dict)==0:
        return {}

    if n_times>1:
        token=tf.placeholder_with_default(2.22)
        a_dict[token]=-2.22

    p_a_dict=take_product(a_dict)

    ##Need divisible batch_size for most models
    if len(p_a_dict)>0:
        L=len(p_a_dict.values()[0])
    else:
        L=0
    print("L is " + str(L))
    print(p_a_dict)

    ##Check compatability batch_size and L
    if L>=model.batch_size:
        if not L % model.batch_size == 0:
            raise ValueError('a_dict must be dividable by batch_size\
                             but instead product of inputs was of length',L)
    elif model.batch_size % L == 0:
        p_a_dict = {key:np.repeat(value,model.batch_size/L,axis=0) for key,value in p_a_dict.items()}
    else:
        raise ValueError('No. of intervened values must divide batch_size.')
    return p_a_dict
models.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self,N):
        with tf.variable_scope('Arrow') as scope:
            self.N=tf.placeholder_with_default(N,shape=[])
            #self.N=tf.constant(N) #how many to sample at a time
            self.e1=tf.random_uniform([self.N,1],0,1)
            self.e2=tf.random_uniform([self.N,1],0,1)
            self.e3=tf.random_uniform([self.N,1],0,1)
            self.build()
            #WARN. some of these are not trainable: i.e. poly
            self.var = tf.contrib.framework.get_variables(scope)
models.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, N, hidden_size=10,z_dim=10):
        with tf.variable_scope('Gen') as scope:
            self.N=tf.placeholder_with_default(N,shape=[])
            self.hidden_size=hidden_size
            self.z_dim=z_dim
            self.build()
            self.tr_var = tf.contrib.framework.get_variables(scope)
            self.step=tf.Variable(0,name='step',trainable=False)
            self.var = tf.contrib.framework.get_variables(scope)
network.py 文件源码 项目:PSPNet-Keras-tensorflow 作者: Vladkryvoruchko 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, inputs, trainable=True):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup()
tensorport.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 61 收藏 0 点赞 0 评论 0
def create_placeholder(self):
        """Creates a TF placeholder_with_default.

        Convenience method that produces a constant of the type, value and shape defined by the port.
        Returns: a constant tensor of same type, shape and name. It can nevertheless be fed with external values
        as if it was a placeholder.
        """
        ph = tf.placeholder_with_default(self.default_value, self.shape, self.name)
        if ph.dtype != self.dtype:
            logger.warning(
                "Placeholder {} with default of type {} created for TensorPort with type {}!".format(self.name,
                                                                                                     ph.dtype,
                                                                                                     self.dtype))
        return ph
model.py 文件源码 项目:DeepVideo 作者: AniketBajpai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_model(self, is_train=True):
        ''' Build model '''

        # Placeholders for data
        self.current_frames = tf.placeholder(
            name='current_frames', dtype=tf.float32,
            shape=[self.batch_size, self.num_frames, self.image_height, self.image_width, self.num_channels]
        )
        self.future_frames = tf.placeholder(
            name='future_frames', dtype=tf.float32,
            shape=[self.batch_size, self.num_frames, self.image_height, self.image_width, self.num_channels]
        )
        # self.label = tf.placeholder(
        #     name='label', dtype=tf.float32, shape=[self.batch_size, self.num_classes]
        # )

        self.is_train = tf.placeholder_with_default(bool(is_train), [], name='is_train')

        # Encoder
        self.E = Encoder('Encoder', self.configs_encoder)
        self.z = self.E(self.current_frames, is_debug=self.is_debug)

        # Generators
        self.Gr = Generator('Generator_R', self.configs_generator)
        self.Gf = Generator('Generator_F', self.configs_generator)

        self.generated_current_frames = self.Gr(self.z, is_debug=self.is_debug)
        self.generated_future_frames = self.Gf(self.z, is_debug=self.is_debug)

        # Discriminators
        self.D = Discriminator('Discriminator', self.configs_discriminator)

        self.D_real_current, self.D_real_current_logits = self.D(self.current_frames, is_debug=self.is_debug)
        self.D_fake_current, self.D_fake_current_logits = self.D(self.generated_current_frames, is_debug=self.is_debug)
        self.D_real_future, self.D_real_future_logits = self.D(self.future_frames, is_debug=self.is_debug)
        self.D_fake_future, self.D_fake_future_logits = self.D(self.generated_future_frames, is_debug=self.is_debug)

        print_message('Successfully loaded the model')
VGG.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
baseop.py 文件源码 项目:tensorflow-yolo 作者: hjimce 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def wrap_pholder(self, ph, feed):
        """wrap layer.h into placeholders"""
        phtype = type(self.lay.h[ph])
        if phtype is not dict: return

        sig = '{}/{}'.format(self.scope, ph)
        val = self.lay.h[ph]

        self.lay.h[ph] = tf.placeholder_with_default(
            val['dfault'], val['shape'], name = sig)
        feed[self.lay.h[ph]] = val['feed']
network.py 文件源码 项目:tensorflow_homographynet 作者: linjian93 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, inputs, trainable=True, is_training=False):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup(is_training)
test_layers.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_input_sample(make_data):
    """Test the input and tiling layer."""
    x, _, X = make_data
    n_samples = tf.placeholder_with_default(3, [])
    s = ab.InputLayer(name='myname', n_samples=n_samples)

    F, KL = s(myname=x)
    tc = tf.test.TestCase()
    with tc.test_session():
        f = F.eval()
        X_array = X.eval()
        assert KL == 0.0
        assert np.array_equal(f, X_array)
        for i in range(3):
            assert np.array_equal(f[i], x)
char_rnn_model.py 文件源码 项目:tensorflow-char-rnn 作者: crazydonkey200 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_tuple_placeholders_with_default(inputs, extra_dims, shape):
  if isinstance(shape, int):
    result = tf.placeholder_with_default(
      inputs, list(extra_dims) + [shape])
  else:
    subplaceholders = [create_tuple_placeholders_with_default(
      subinputs, extra_dims, subshape)
                       for subinputs, subshape in zip(inputs, shape)]
    t = type(shape)
    if t == tuple:
      result = t(subplaceholders)
    else:
      result = t(*subplaceholders)    
  return result
tf_model.py 文件源码 项目:char-rnn-text-generation 作者: yxtay 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_train_graph(loss, learning_rate=0.001, clip_norm=5.0):
    """
    builds training graph
    """
    train_args = {"learning_rate": learning_rate, "clip_norm": clip_norm}
    logger.debug("building training graph: %s.", train_args)

    learning_rate = tf.placeholder_with_default(learning_rate, [], "learning_rate")
    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = layers.optimize_loss(loss, global_step, learning_rate, "Adam",
                                    clip_gradients=clip_norm)

    model = {"global_step": global_step, "train_op": train_op,
             "learning_rate": learning_rate, "train_args": train_args}
    return model
main.py 文件源码 项目:language-translation-english-to-french 作者: Satyaki0924 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main(self):
        train_graph = tf.Graph()
        save_path = self.path + '/checkpoints/dev'
        source_path = self.path + '/data/small_vocab_en'
        target_path = self.path + '/data/small_vocab_fr'
        PreProcess(source_path, target_path).process_and_save_data()
        _, batch_size, rnn_size, num_layers, encoding_embedding_size, decoding_embedding_size, _, _ = \
            Params().get()
        (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = \
            self.load_process()
        max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
        with train_graph.as_default():
            input_data, targets, lr, keep_prob = Inputs().get()
            sequence_length = tf.placeholder_with_default(
                max_source_sentence_length, None, name='sequence_length')
            input_shape = tf.shape(input_data)
            train_logits, inference_logits = Seq2seq().seq2seq_model(
                tf.reverse(input_data, [-1]), targets, keep_prob, batch_size,
                sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
                encoding_embedding_size, decoding_embedding_size,
                rnn_size, num_layers, target_vocab_to_int)
            tf.identity(inference_logits, 'logits')
            with tf.name_scope("optimization"):
                cost = tf.contrib.seq2seq.sequence_loss(train_logits, targets,
                                                        tf.ones([input_shape[0], sequence_length]))
                optimizer = tf.train.AdamOptimizer(lr)
                gradients = optimizer.compute_gradients(cost)
                capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
                                    for grad, var in gradients if grad is not None]
                train_op = optimizer.apply_gradients(capped_gradients)
        Train(source_int_text, target_int_text, train_graph, train_op, cost,
              input_data, targets, lr, sequence_length, keep_prob, inference_logits, save_path).train()
model.py 文件源码 项目:text-gan-tensorflow 作者: tokestermw 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def prepare_data(path, word2idx, num_threads=8, **opts):
    with tf.device("/cpu:0"):
        enqueue_data, dequeue_batch = get_input_queues(
            path, word2idx, batch_size=opts["batch_size"], num_threads=num_threads)
        # TODO: put this logic somewhere else
        input_ph = tf.placeholder_with_default(dequeue_batch, (None, None))
        source, target, sequence_length = preprocess(input_ph)
    return enqueue_data, input_ph, source, target, sequence_length
maml.py 文件源码 项目:maml 作者: cbfinn 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):
        """ must call construct_model() after initializing MAML! """
        self.dim_input = dim_input
        self.dim_output = dim_output
        self.update_lr = FLAGS.update_lr
        self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
        self.classification = False
        self.test_num_updates = test_num_updates
        if FLAGS.datasource == 'sinusoid':
            self.dim_hidden = [40, 40]
            self.loss_func = mse
            self.forward = self.forward_fc
            self.construct_weights = self.construct_fc_weights
        elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':
            self.loss_func = xent
            self.classification = True
            if FLAGS.conv:
                self.dim_hidden = FLAGS.num_filters
                self.forward = self.forward_conv
                self.construct_weights = self.construct_conv_weights
            else:
                self.dim_hidden = [256, 128, 64, 64]
                self.forward=self.forward_fc
                self.construct_weights = self.construct_fc_weights
            if FLAGS.datasource == 'miniimagenet':
                self.channels = 3
            else:
                self.channels = 1
            self.img_size = int(np.sqrt(self.dim_input/self.channels))
        else:
            raise ValueError('Unrecognized data source.')
gan.py 文件源码 项目:tf_serving_example 作者: Vetal1977 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, input_real, z_size, learning_rate, num_classes=10,
                 alpha=0.2, beta1=0.5, drop_rate=.5):
        """
        Initializes the GAN model.

        :param input_real: Real data for the discriminator
        :param z_size: The number of entries in the noise vector.
        :param learning_rate: The learning rate to use for Adam optimizer.
        :param num_classes: The number of classes to recognize.
        :param alpha: The slope of the left half of the leaky ReLU activation
        :param beta1: The beta1 parameter for Adam.
        :param drop_rate: RThe probability of dropping a hidden unit (used in discriminator)
        """

        self.learning_rate = tf.Variable(learning_rate, trainable=False)
        self.input_real = input_real
        self.input_z = tf.placeholder(tf.float32, (None, z_size), name='input_z')
        self.y = tf.placeholder(tf.int32, (None), name='y')
        self.label_mask = tf.placeholder(tf.int32, (None), name='label_mask')
        self.drop_rate = tf.placeholder_with_default(drop_rate, (), "drop_rate")

        loss_results = self.model_loss(self.input_real, self.input_z,
                                       self.input_real.shape[3], self.y, num_classes,
                                       label_mask=self.label_mask,
                                       drop_rate=self.drop_rate,
                                       alpha=alpha)

        self.d_loss, self.g_loss, self.correct, \
            self.masked_correct, self.samples, self.pred_class, \
                self.discriminator_class_logits, self.discriminator_out = \
                    loss_results

        self.d_opt, self.g_opt, self.shrink_lr = self.model_opt(self.d_loss,
                                                                self.g_loss,
                                                                self.learning_rate, beta1)
network.py 文件源码 项目:tensorflow-deeplab-resnet 作者: DrSleep 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, inputs, trainable=True, is_training=False, num_classes=21):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup(is_training, num_classes)
model.py 文件源码 项目:Activation-Visualization-Histogram 作者: shaohua0116 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, config,
                 debug_information=False,
                 is_train=True):
        self.debug = debug_information

        self.config = config
        self.batch_size = self.config.batch_size
        self.input_height = self.config.data_info[0]
        self.input_width = self.config.data_info[1]
        self.num_class = self.config.data_info[2]
        self.c_dim = self.config.data_info[3]
        self.visualize_shape = self.config.visualize_shape
        self.conv_info = self.config.conv_info
        self.activation_fn = {
            'selu': selu,
            'relu': tf.nn.relu,
            'lrelu': lrelu,
        }[self.config.activation]

        # create placeholders for the input
        self.image = tf.placeholder(
            name='image', dtype=tf.float32,
            shape=[self.batch_size, self.input_height, self.input_width, self.c_dim],
        )
        self.label = tf.placeholder(
            name='label', dtype=tf.float32, shape=[self.batch_size, self.num_class],
        )

        self.is_training = tf.placeholder_with_default(bool(is_train), [], name='is_training')

        self.build(is_train=is_train)
network_base.py 文件源码 项目:tf-openpose 作者: ildoonet 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, inputs, trainable=True):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup()


问题


面经


文章

微信
公众号

扫码关注公众号