python类get_default_graph()的实例源码

train.py 文件源码 项目:TFFRCNN 作者: InterVideo 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, sess, network, imdb, roidb, output_dir, logdir, pretrained_model=None):
        """Initialize the SolverWrapper."""
        self.net = network
        self.imdb = imdb
        self.roidb = roidb
        self.output_dir = output_dir
        self.pretrained_model = pretrained_model

        print 'Computing bounding-box regression targets...'
        if cfg.TRAIN.BBOX_REG:
            self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
        print 'done'

        # For checkpoint
        self.saver = tf.train.Saver(max_to_keep=100)
        self.writer = tf.train.SummaryWriter(logdir=logdir,
                                             graph=tf.get_default_graph(),
                                             flush_secs=5)
seq2seq.py 文件源码 项目:nlvr_tau_nlp_final_proj 作者: udiNaveh 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_feed_dicts_from_sentence(sentence, sentence_placeholder, sent_lengths_placeholder, sentence_words_bow,
                                 encoder_output_tensors, learn_embeddings=False):
    """
    creates the values needed and feed-dicts that depend on the sentence.
    these feed dicts are used to run or to compute gradients.
    """

    sentence_matrix = np.stack([one_hot_dict.get(w, one_hot_dict['<UNK>']) for w in sentence.split()])
    bow_words = np.reshape(np.sum([words_array == x for x in sentence.split()], axis=0), [1, len(words_vocabulary)])

    length = [len(sentence.split())]
    encoder_feed_dict = {sentence_placeholder: sentence_matrix, sent_lengths_placeholder: length,
                         sentence_words_bow: bow_words}
    sentence_encoder_outputs = sess.run(encoder_output_tensors, feed_dict=encoder_feed_dict)
    decoder_feed_dict = {encoder_output_tensors[i]: sentence_encoder_outputs[i]
                         for i in range(len(encoder_output_tensors))}

    if not learn_embeddings:
        W_we = tf.get_default_graph().get_tensor_by_name('W_we:0')
        encoder_feed_dict = union_dicts(encoder_feed_dict, {W_we: embeddings_matrix})
    return encoder_feed_dict, decoder_feed_dict
special_fn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def underlying_variable(t):
    """Find the underlying tf.Variable object.

    Args:
      t: a Tensor

    Returns:
      a tf.Varaible object.
    """
    t = variable_ref(t)
    assert t is not None
    # make sure that the graph has a variable index and that it is up-to-date
    if not hasattr(tf.get_default_graph(), "var_index"):
        tf.get_default_graph().var_index = {}
    var_index = tf.get_default_graph().var_index
    for v in tf.global_variables()[len(var_index):]:
        var_index[v.name] = v
    return var_index[t.name]
gdn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _lower_bound(inputs, bound, name=None):
    """Same as tf.maximum, but with helpful gradient for inputs < bound.
    The gradient is overwritten so that it is passed through if the input is not
    hitting the bound. If it is, only gradients that push `inputs` higher than
    the bound are passed through. No gradients are passed through to the bound.
    Args:
      inputs: input tensor
      bound: lower bound for the input tensor
      name: name for this op
    Returns:
      tf.maximum(inputs, bound)
    """
    with tf.name_scope(name, 'GDNLowerBoundTefla', [inputs, bound]) as scope:
        inputs = tf.convert_to_tensor(inputs, name='inputs')
        bound = tf.convert_to_tensor(bound, name='bound')
        with tf.get_default_graph().gradient_override_map(
                {'Maximum': 'GDNLowerBoundTefla'}):
            return tf.maximum(inputs, bound, name=scope)
utils_icarl.py 文件源码 项目:iCaRL 作者: srebuffi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reading_data_and_preparing_network(files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing, nb_groups, nb_cl, save_path):
    image_train, label_train,file_string       = utils_data.read_data_test(train_path,labels_dic, mixing,files_from_cl=files_from_cl)
    image_batch, label_batch,file_string_batch = tf.train.batch([image_train, label_train,file_string], batch_size=batch_size, num_threads=8)
    label_batch_one_hot = tf.one_hot(label_batch,nb_groups*nb_cl)

    ### Network and loss function  
    mean_img = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
    with tf.variable_scope('ResNet18'):
        with tf.device('/gpu:'+gpu):
            scores         = utils_resnet.ResNet18(image_batch-mean_img, phase='test',num_outputs=nb_cl*nb_groups)
            graph          = tf.get_default_graph()
            op_feature_map = graph.get_operation_by_name('ResNet18/pool_last/avg').outputs[0]

    loss_class = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch_one_hot, logits=scores))

    ### Initilization
    params = dict(cPickle.load(open(save_path+'model-iteration'+str(nb_cl)+'-%i.pickle' % itera)))
    inits  = utils_resnet.get_weight_initializer(params)

    return inits,scores,label_batch,loss_class,file_string_batch,op_feature_map
exported_model.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def restore_model(self):
        # Load meta graph and learned weights
        saver = tf.train.import_meta_graph(self.export_dir + self.name + '.meta')
        saver.restore(self.session, tf.train.latest_checkpoint(self.export_dir))

        # Get input and output nodes
        graph = tf.get_default_graph()
        self.input = graph.get_tensor_by_name("input_node:0")
        self.input_len = graph.get_tensor_by_name("input_lengths:0")
        self.output = graph.get_tensor_by_name("output_node:0")
skip_rnn_cells.py 文件源码 项目:skiprnn-2017-telecombcn 作者: imatge-upc 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _binary_round(x):
    """
    Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
    using the straight through estimator for the gradient.

    Based on http://r2rt.com/binary-stochastic-neurons-in-tensorflow.html

    :param x: input tensor
    :return: y=round(x) with gradients defined by the identity mapping (y=x)
    """
    g = tf.get_default_graph()

    with ops.name_scope("BinaryRound") as name:
        with g.gradient_override_map({"Round": "Identity"}):
            return tf.round(x, name=name)
hooks.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def after_run(self, _run_context, run_values):
    if not self.is_chief or self._done:
      return

    step_done = run_values.results
    if self._active:
      tf.logging.info("Captured full trace at step %s", step_done)
      # Create output directory
      gfile.MakeDirs(self._output_dir)

      # Save run metadata
      trace_path = os.path.join(self._output_dir, "run_meta")
      with gfile.GFile(trace_path, "wb") as trace_file:
        trace_file.write(run_values.run_metadata.SerializeToString())
        tf.logging.info("Saved run_metadata to %s", trace_path)

      # Save timeline
      timeline_path = os.path.join(self._output_dir, "timeline.json")
      with gfile.GFile(timeline_path, "w") as timeline_file:
        tl_info = timeline.Timeline(run_values.run_metadata.step_stats)
        tl_chrome = tl_info.generate_chrome_trace_format(show_memory=True)
        timeline_file.write(tl_chrome)
        tf.logging.info("Saved timeline to %s", timeline_path)

      # Save tfprof op log
      tf.contrib.tfprof.tfprof_logger.write_op_log(
          graph=tf.get_default_graph(),
          log_dir=self._output_dir,
          run_meta=run_values.run_metadata)
      tf.logging.info("Saved op log to %s", self._output_dir)
      self._active = False
      self._done = True

    self._active = (step_done >= self.params["step"])
hooks.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def begin(self):
    # Dump to file on the chief worker
    if self.is_chief:
      opts = tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
      opts['dump_to_file'] = os.path.abspath(self._filename)
      tf.contrib.tfprof.model_analyzer.print_model_analysis(
          tf.get_default_graph(), tfprof_options=opts)

    # Print the model analysis
    with gfile.GFile(self._filename) as file:
      tf.logging.info(file.read())
cifar10_eval.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
benchmark_cnn.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _eval_cnn(self):
    """Evaluate a model every self.params.eval_interval_secs.

    Returns:
      Dictionary containing eval statistics. Currently returns an empty
      dictionary.
    """
    (image_producer_ops, enqueue_ops, fetches) = self._build_model()
    saver = tf.train.Saver(self.variable_mgr.savable_variables())
    summary_writer = tf.summary.FileWriter(self.params.eval_dir,
                                           tf.get_default_graph())
    target = ''
    local_var_init_op = tf.local_variables_initializer()
    variable_mgr_init_ops = [local_var_init_op]
    with tf.control_dependencies([local_var_init_op]):
      variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())
    local_var_init_op_group = tf.group(*variable_mgr_init_ops)
    summary_op = tf.summary.merge_all()
    # TODO(huangyp): Check if checkpoints haven't updated for hours and abort.
    while True:
      self._eval_once(saver, summary_writer, target, local_var_init_op_group,
                      image_producer_ops, enqueue_ops, fetches, summary_op)
      if self.params.eval_interval_secs <= 0:
        break
      time.sleep(self.params.eval_interval_secs)
    return {}
model.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _find_or_raise(self, tensor_name: str) -> tf.Tensor:
        """
        Find the tensor with the given name in the default graph or raise an exception.
        :param tensor_name: tensor name to be find
        :return: tf.Tensor
        """
        full_name = self._get_full_name(tensor_name)
        try:
            return tf.get_default_graph().get_tensor_by_name(full_name)
        except (KeyError, ValueError, TypeError) as ex:
            raise ValueError('Tensor `{}` with full name `{}` was not found.'.format(tensor_name, full_name)) from ex
utilities.py 文件源码 项目:sea-lion-counter 作者: rdinse 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_node(name):
  return tf.get_default_graph().as_graph_element(name.split(":")[0])
BaseModel.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _batch_normalization(self, x, layer_name, eps=0.001):
        with tf.variable_scope(layer_name.split('/')[-1]):
            beta, gamma, mean, variance = self._get_batch_normalization_weights(layer_name)
            # beta, gamma, mean, variance are numpy arrays!!!

            if beta is None:
                try:
                    net = tf.layers.batch_normalization(x, epsilon = eps)
                except:
                    net = tf.nn.batch_normalization(x, 0, 1, 0, 1, 0.01)
            else:
                try:
                    net = tf.layers.batch_normalization(x, epsilon = eps,        
                        beta_initializer = tf.constant_initializer(value=beta,dtype=tf.float32),
                        gamma_initializer = tf.constant_initializer(value=gamma,dtype=tf.float32),
                        moving_mean_initializer = tf.constant_initializer(value=mean,dtype=tf.float32),
                        moving_variance_initializer = tf.constant_initializer(value=variance,dtype=tf.float32), 
                    )
                except:
                    net = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.01)
        mean = '%s/batch_normalization/moving_mean:0'%(layer_name)
        variance = '%s/batch_normalization/moving_variance:0'%(layer_name)
        try:
            tf.add_to_collection(tf.GraphKeys.SAVE_TENSORS, tf.get_default_graph().get_tensor_by_name(mean))
            tf.add_to_collection(tf.GraphKeys.SAVE_TENSORS, tf.get_default_graph().get_tensor_by_name(variance))
        except:
            pass
        return net
test_base.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_extraction_target(inputs, outputs, to_extract, **loss_params):
        """Produce validation target function.

        Example validation target function to use to provide targets for extracting features.
        This function also adds a standard "loss" target which you may or not may not want

        The to_extract argument must be a dictionary of the form
              {name_for_saving: name_of_actual_tensor, ...}
        where the "name_for_saving" is a human-friendly name you want to save extracted
        features under, and name_of_actual_tensor is a name of the tensor in the tensorflow
        graph outputing the features desired to be extracted.  To figure out what the names
        of the tensors you want to extract are "to_extract" argument,  uncomment the
        commented-out lines, which will print a list of all available tensor names.

        """
        names = [[x.name for x in op.values()] for op in tf.get_default_graph().get_operations()]
        names = [y for x in names for y in x]

        r = re.compile(r'__GPU__\d/')
        _targets = defaultdict(list)

        for name in names:
            name_without_gpu_prefix = r.sub('', name)
            for save_name, actual_name in to_extract.items():
                if actual_name in name_without_gpu_prefix:
                    tensor = tf.get_default_graph().get_tensor_by_name(name)
                    _targets[save_name].append(tensor)

        targets = {k: tf.concat(v, axis=0) for k, v in _targets.items()}
        targets['loss'] = utils.get_loss(inputs, outputs, **loss_params)
        return targets
test.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_extraction_target(inputs, outputs, to_extract, **loss_params):
    """Produce validation target function.

    Example validation target function to use to provide targets for extracting features.
    This function also adds a standard "loss" target which you may or not may not want

    The to_extract argument must be a dictionary of the form
          {name_for_saving: name_of_actual_tensor, ...}
    where the "name_for_saving" is a human-friendly name you want to save extracted
    features under, and name_of_actual_tensor is a name of the tensor in the tensorflow
    graph outputing the features desired to be extracted.  To figure out what the names
    of the tensors you want to extract are "to_extract" argument,  uncomment the
    commented-out lines, which will print a list of all available tensor names.

    """
    names = [[x.name for x in op.values()] for op in tf.get_default_graph().get_operations()]
    names = [y for x in names for y in x]

    r = re.compile(r'__GPU__\d/')
    _targets = defaultdict(list)

    for name in names:
        name_without_gpu_prefix = r.sub('', name)
        for save_name, actual_name in to_extract.items():
            if actual_name in name_without_gpu_prefix:
                tensor = tf.get_default_graph().get_tensor_by_name(name)
                _targets[save_name].append(tensor)

    targets = {k: tf.concat(v, axis=0) for k, v in _targets.items()}
    targets['loss'] = utils.get_loss(inputs, outputs, **loss_params)
    return targets
model.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _reuse_scope_name(self, name):
        graph = tf.get_default_graph()
        if graph._name_stack is not None and graph._name_stack != '':
            name = graph._name_stack + '/' + name + '/'  # this will reuse the already-created scope
        else:
            name += '/'
        return name
freezemodel.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def freeze(model_scope, model_dir, model_file):
    """
    Args:
        model_scope: The prefix of all variables in the model.
        model_dir: The full path to the folder in which the result file locates.
        model_file: The file that saves the training results, without file suffix / extension.
    """
    saver = tf.train.import_meta_graph(os.path.join(model_dir, model_file + ".meta"))
    graph = tf.get_default_graph()
    input_graph_def = graph.as_graph_def()

    with tf.Session() as sess:
        saver.restore(sess, os.path.join(model_dir, model_file))

        print("# All operations:")
        for op in graph.get_operations():
            print(op.name)

        output_node_names = [v.name.split(":")[0] for v in tf.trainable_variables()]
        output_node_names.append("{}/readout/logits".format(model_scope))
        output_graph_def = tf.graph_util.convert_variables_to_constants(
            sess,
            input_graph_def,
            output_node_names
        )

        output_file = os.path.join(model_dir, model_file + ".pb")
        with tf.gfile.GFile(output_file, "wb") as f:
            f.write(output_graph_def.SerializeToString())

        print("Freezed model was saved as {}.pb.".format(model_file))
convert.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def save_graph(save_path):
    graph = tf.get_default_graph()
    graph_def = graph.as_graph_def()
    print "graph_def byte size", graph_def.ByteSize()
    graph_def_s = graph_def.SerializeToString()

    with open(save_path, "wb") as f:
        f.write(graph_def_s)

    print "saved model to %s" % save_path
resnet_test.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_old_model(sess, nlayers, device='/cpu:0'):
    with tf.device(device):
        new_saver = tf.train.import_meta_graph(meta_fn(nlayers))
    new_saver.restore(sess, checkpoint_fn(nlayers))
    graph = tf.get_default_graph()
    prob_tensor = graph.get_tensor_by_name("prob:0")
    images = graph.get_tensor_by_name("images:0")
    return graph, images, prob_tensor


问题


面经


文章

微信
公众号

扫码关注公众号