python类set_random_seed()的实例源码

test_vbn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_statistics(self):
        """Check that `_statistics` gives the same result as `nn.moments`."""
        tf.set_random_seed(1234)

        tensors = tf.random_normal([4, 5, 7, 3])
        for axes in [(3), (0, 2), (1, 2, 3)]:
            vb_mean, mean_sq = virtual_batchnorm._statistics(tensors, axes)
            mom_mean, mom_var = tf.nn.moments(tensors, axes)
            vb_var = mean_sq - tf.square(vb_mean)

            with self.test_session(use_gpu=True) as sess:
                vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
                    vb_mean, vb_var, mom_mean, mom_var])

            self.assertAllClose(mom_mean_np, vb_mean_np)
            self.assertAllClose(mom_var_np, vb_var_np)
test_vbn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_reference_batch_normalization(self):
        """Check that batch norm from VBN agrees with opensource implementation."""
        tf.set_random_seed(1234)

        batch = tf.random_normal([6, 5, 7, 3, 3])

        for axis in range(5):
            # Get `layers` batchnorm result.
            bn_normalized = tf.layers.batch_normalization(
                batch, axis, training=True)

            # Get VBN's batch normalization on reference batch.
            batch_axis = 0 if axis is not 0 else 1  # axis and batch_axis can't same
            vbn = virtual_batchnorm.VBN(batch, axis, batch_axis=batch_axis)
            vbn_normalized = vbn.reference_batch_normalization()

            with self.test_session(use_gpu=True) as sess:
                tf.global_variables_initializer().run()

                bn_normalized_np, vbn_normalized_np = sess.run(
                    [bn_normalized, vbn_normalized])
            self.assertAllClose(bn_normalized_np, vbn_normalized_np)
test_vbn.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_same_as_batchnorm(self):
        """Check that batch norm on set X is the same as ref of X / y on `y`."""
        tf.set_random_seed(1234)

        num_examples = 4
        examples = [tf.random_normal([5, 7, 3]) for _ in
                    range(num_examples)]

        # Get the result of the opensource batch normalization.
        batch_normalized = tf.layers.batch_normalization(
            tf.stack(examples), training=True)

        for i in range(num_examples):
            examples_except_i = tf.stack(examples[:i] + examples[i + 1:])
            # Get the result of VBN's batch normalization.
            vbn = virtual_batchnorm.VBN(examples_except_i)
            vb_normed = tf.squeeze(
                vbn(tf.expand_dims(examples[i], [0])), [0])

            with self.test_session(use_gpu=True) as sess:
                tf.global_variables_initializer().run()
                bn_np, vb_np = sess.run([batch_normalized, vb_normed])
            self.assertAllClose(bn_np[i, ...], vb_np)
dm_main.py 文件源码 项目:deep-makeover 作者: david-gpu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _setup_tensorflow():
    # Create session
    config = tf.ConfigProto(log_device_placement=False) #, intra_op_parallelism_threads=1)
    sess   = tf.Session(config=config)

    # Initialize all RNGs with a deterministic seed
    with sess.graph.as_default():
        tf.set_random_seed(FLAGS.random_seed)

    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    return sess


# TBD: Move to dm_train.py?
ddpg.py 文件源码 项目:deep_portfolio 作者: deependersingla 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def main(_):
    with tf.Session() as sess:
        env = EquityEnvironment(assets, look_back, episode_length, look_back_reinforcement, price_series, train=True)
        np.random.seed(RANDOM_SEED)
        tf.set_random_seed(RANDOM_SEED)

        state_dim = num_inputs
        action_dim = num_actions
        action_bound = num_action_bound
        # Ensure action bound is symmetric
        # assert (env.action_space.high == -env.action_space.low)

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound, \
                             ACTOR_LEARNING_RATE, TAU)

        critic = CriticNetwork(sess, state_dim, action_dim, \
                               CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars())

        actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))

        train(sess, env, actor, critic, actor_noise)
resnet_v1_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def testAtrousFullyConvolutionalValues(self):
    """Verify dense feature extraction with atrous convolution."""
    nominal_stride = 32
    for output_stride in [4, 8, 16, 32, None]:
      with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
        with tf.Graph().as_default():
          with self.test_session() as sess:
            tf.set_random_seed(0)
            inputs = create_test_input(2, 81, 81, 3)
            # Dense feature extraction followed by subsampling.
            output, _ = self._resnet_small(inputs, None, global_pool=False,
                                           output_stride=output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride
            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            tf.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected, _ = self._resnet_small(inputs, None, global_pool=False)
            sess.run(tf.initialize_all_variables())
            self.assertAllClose(output.eval(), expected.eval(),
                                atol=1e-4, rtol=1e-4)
resnet_v2_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testAtrousFullyConvolutionalValues(self):
    """Verify dense feature extraction with atrous convolution."""
    nominal_stride = 32
    for output_stride in [4, 8, 16, 32, None]:
      with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
        with tf.Graph().as_default():
          with self.test_session() as sess:
            tf.set_random_seed(0)
            inputs = create_test_input(2, 81, 81, 3)
            # Dense feature extraction followed by subsampling.
            output, _ = self._resnet_small(inputs, None, global_pool=False,
                                           output_stride=output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride
            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            tf.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected, _ = self._resnet_small(inputs, None, global_pool=False)
            sess.run(tf.initialize_all_variables())
            self.assertAllClose(output.eval(), expected.eval(),
                                atol=1e-4, rtol=1e-4)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(
          total_loss, optimizer)

      loss = slim.learning.train(
          train_op, self._logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertLess(loss, .1)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testTrainWithNonDefaultGraph(self):
    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs8/')
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

    loss = slim.learning.train(
        train_op, self._logdir, number_of_steps=300, log_every_n_steps=10,
        graph=g)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def testTrainWithNoneAsLogdir(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      loss = slim.learning.train(
          train_op, None, number_of_steps=300, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def testTrainWithSessionConfig(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      session_config = tf.ConfigProto(allow_soft_placement=True)
      loss = slim.learning.train(
          train_op,
          None,
          number_of_steps=300,
          log_every_n_steps=10,
          session_config=session_config)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        slim.learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs_/')
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)
      saver = tf.train.Saver()

      with self.assertRaises(ValueError):
        slim.learning.train(
            train_op, None, init_op=None, number_of_steps=300, saver=saver)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs_/')
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(
          total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        slim.learning.train(
            train_op, self._logdir, init_op=None, number_of_steps=300)
learning_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      loss = slim.learning.train(
          train_op, self._logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
run_test.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 67 收藏 0 点赞 0 评论 0
def run():
    if len(sys.argv) < 3:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)
    test_data = load_data(sys.argv[2], config.dictionary, config.grammar, config.max_length)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        with tf.device('/cpu:0'):
            model.build()

            test_eval = Seq2SeqEvaluator(model, config.grammar, test_data, 'test', config.reverse_dictionary, beam_size=config.beam_size, batch_size=config.batch_size)
            loader = tf.train.Saver()

            with tf.Session() as sess:
                loader.restore(sess, os.path.join(model_dir, 'best'))

                #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
                #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

                test_eval.eval(sess, save_to_file=True)
eval_output_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def run():
    if len(sys.argv) < 4:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Everything Set>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)

    everything_labels, everything_label_lengths = load_programs(config, sys.argv[2])
    test_labels, test_label_lengths = load_programs(config, sys.argv[3])
    #test_labels, test_label_lengths = sample(config.grammar, test_labels, test_label_lengths)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        model.build()
        loader = tf.train.Saver()

        train_bag_of_tokens = bag_of_tokens(config, everything_labels, everything_label_lengths)
        V, mean = pca_fit(train_bag_of_tokens, n_components=2)

        eval_bag_of_tokens = bag_of_tokens(config, test_labels, test_label_lengths)
        transformed = pca_transform(eval_bag_of_tokens, V, mean)

        with tf.Session() as sess:
            loader.restore(sess, os.path.join(model_dir, 'best'))
            transformed = transformed.eval(session=sess)

        programs = reconstruct_programs(test_labels, test_label_lengths, config.grammar.tokens)
        show_pca(transformed, programs)
check_video_id.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def check_video_id():
  tf.set_random_seed(0)  # for reproducibility
  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    # prepare a reader for each single model prediction result
    all_readers = []

    all_patterns = FLAGS.eval_data_patterns
    all_patterns = map(lambda x: x.strip(), all_patterns.strip().strip(",").split(","))
    for i in xrange(len(all_patterns)):
      reader = readers.EnsembleReader(
          feature_names=feature_names, feature_sizes=feature_sizes)
      all_readers.append(reader)

    input_reader = None
    input_data_pattern = None
    if FLAGS.input_data_pattern is not None:
      input_reader = readers.EnsembleReader(
          feature_names=["mean_rgb","mean_audio"], feature_sizes=[1024,128])
      input_data_pattern = FLAGS.input_data_pattern

    if FLAGS.eval_data_patterns is "":
      raise IOError("'eval_data_patterns' was not specified. " +
                     "Nothing to evaluate.")

    build_graph(
        all_readers=all_readers,
        input_reader=input_reader,
        input_data_pattern=input_data_pattern,
        all_eval_data_patterns=all_patterns,
        batch_size=FLAGS.batch_size)

    logging.info("built evaluation graph")
    video_id_equal = tf.get_collection("video_id_equal")[0]
    input_distance = tf.get_collection("input_distance")[0]

    check_loop(video_id_equal, input_distance, all_patterns)
misc_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def set_global_seeds(i):
    try:
        import tensorflow as tf
    except ImportError:
        pass
    else:
        tf.set_random_seed(i)
    np.random.seed(i)
    random.seed(i)
env.py 文件源码 项目:bnn-analysis 作者: myshkov 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        """ Creates a new Env object. """
        # set seeds
        self.seed = 2305
        np.random.seed(self.seed)
        tf.set_random_seed(self.seed)

        # test case
        self.env_name = None  # name of the environment
        self.model_name = None  # name of the model
        self.test_case_name = 'test'  # name of the test
        self.baseline_test_case_name = None  # name of the test containing 'true' posterior
        self.data_dir = None

        # data
        self.input_dim = None  # number of feature
        self.output_dim = None
        self.data_size = None  # number of rows

        self.n_splits = 10
        self.current_split = 0
        self.train_x = list()
        self.train_y = list()
        self.test_x = list()
        self.test_y = list()

        # common model/sampler parameters
        self.layers_description = None
        self.model_parameters_size = None
        self.batch_size = 10
        self.chains_num = 1  # number of models to un in parallel; parameters are for each chain
        self.n_chunks = 100  # samples are drawn and stored in chunks
        self.n_samples = 100  # samples per chunk
        self.thinning = 0  # number of samples to discard

        self.sampler = None  # sampler created for current split
        self.sampler_factory = None

        # other
        self._log_handler = None


问题


面经


文章

微信
公众号

扫码关注公众号