python类constant()的实例源码

problems.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def random_access_problem(which=1):
    import raputil as ru
    if which == 1:
        opts = ru.Problem.scenario1()
    else:
        opts = ru.Problem.scenario2()

    p = ru.Problem(**opts)
    x1 = p.genX(1)
    y1 = p.fwd(x1)
    A = p.S
    M,N = A.shape
    nbatches = int(math.ceil(1000 /x1.shape[1]))
    prob = NumpyGenerator(p=p,nbatches=nbatches,A=A,opts=opts,iid=(which==1))
    if which==2:
        prob.maskX_ = tf.expand_dims( tf.constant( (np.arange(N) % (N//2) < opts['Nu']).astype(np.float32) ) , 1)

    _,prob.noise_var = p.add_noise(y1)

    unused = p.genYX(nbatches) # for legacy reasons -- want to compare against a previous run
    (prob.yval, prob.xval) = p.genYX(nbatches)
    (prob.yinit, prob.xinit) = p.genYX(nbatches)
    import multiprocessing as mp
    prob.nsubprocs = mp.cpu_count()
    return prob
shrinkage.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def pwlin_grid(r_,rvar_,theta_,dtheta = .75):
    """piecewise linear with noise-adaptive grid spacing.
    returns xhat,dxdr
    where
        q = r/dtheta/sqrt(rvar)
        xhat = r * interp(q,theta)

    all but the  last dimensions of theta must broadcast to r_
    e.g. r.shape = (500,1000) is compatible with theta.shape=(500,1,7)
    """
    ntheta = int(theta_.get_shape()[-1])
    scale_ = dtheta / tf.sqrt(rvar_)
    ars_ = tf.clip_by_value( tf.expand_dims( tf.abs(r_)*scale_,-1),0.0, ntheta-1.0 )
    centers_ = tf.constant( np.arange(ntheta),dtype=tf.float32 )
    outer_distance_ = tf.maximum(0., 1.0-tf.abs(ars_ - centers_) ) # new dimension for distance to closest bin centers (or center)
    gain_ = tf.reduce_sum( theta_ * outer_distance_,axis=-1) # apply the gain (learnable)
    xhat_ = gain_ * r_
    dxdr_ = tf.gradients(xhat_,r_)[0]
    return (xhat_,dxdr_)
tfinterp.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def interp1d_(xin_,xp,yp_):
    """
    Interpolate a uniformly sampled piecewise linear function. Mapping elements
    from xin_ to the result.  Input values will be clipped to range of xp.
        xin_ :  input tensor (real)
        xp : x grid (constant -- must be a 1d numpy array, uniformly spaced)
        yp_ : tensor of the result values at the gridpoints xp
    """
    import tensorflow as tf
    x_ = tf.clip_by_value(xin_,xp.min(),xp.max())
    dx = xp[1]-xp[0]
    assert len(xp.shape)==1,'only 1d interpolation'
    assert xp.shape[0]==int(yp_.get_shape()[0])
    assert abs(np.diff(xp)/dx - 1.0).max() < 1e-6,'must be uniformly sampled'

    newshape = [  ]
    x1_ = tf.expand_dims(x_,-1)
    dt = yp_.dtype
    wt_ = tf.maximum(tf.constant(0.,dtype=dt), 1-abs(x1_ - tf.constant(xp,dtype=dt))/dx  )
    y_ = tf.reduce_sum(wt_ * yp_,axis=-1)
    return y_
models.py 文件源码 项目:PyMDNet 作者: HungWei-Andy 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def __init__(self, config):
    self.layers  = {}
    self.weights = {}
    self.biases  = {}
    self.losses  = {}
    self.regular_losses = {}
    self.trainable = {}
    self.summaries = {}

    # set parameters
    self.lr_rates = {}
    for key, val in config.lr_rates.iteritems():
      self.lr_rates[key] = tf.get_variable('lr_rates/'+key, initializer=tf.constant(val), dtype=tf.float32)

    self.momentum = tf.get_variable('momentum', initializer=tf.constant(config.momentum), dtype=tf.float32)
    self.weight_decay = tf.get_variable('weight_decay', initializer=tf.constant(config.weight_decay), dtype=tf.float32)
    self.lr_rate = tf.get_variable('lr_rate', initializer=tf.constant(config.lr_rate), dtype=tf.float32)
ttbb_models.py 文件源码 项目:shalo 作者: henryre 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _embed_sentences(self):
        """Tensorflow implementation of Simple but Tough-to-Beat Baseline"""
        # Get word features
        word_embeddings = self._get_embedding()
        word_feats      = tf.nn.embedding_lookup(word_embeddings, self.input)
        # Get marginal estimates and scaling term
        batch_size = tf.shape(word_feats)[0]
        a = tf.pow(10.0, self._get_a_exp())
        p = tf.constant(self.marginals, dtype=tf.float32, name='marginals')
        q = tf.reshape(
            a / (a + tf.nn.embedding_lookup(p, self.input)),
            (batch_size, self.mx_len, 1)
        )
        # Compute initial sentence embedding
        z = tf.reshape(1.0 / tf.to_float(self.input_lengths), (batch_size, 1))
        S = z * tf.reduce_sum(q * word_feats, axis=1)
        # Compute common component
        S_centered = S - tf.reduce_mean(S, axis=0)
        _, _, V = tf.svd(S_centered, full_matrices=False, compute_uv=True)
        self.tf_ccx = tf.stop_gradient(tf.gather(tf.transpose(V), 0))
        # Common component removal
        ccx = tf.reshape(self._get_common_component(), (1, self.d))
        sv = {'embeddings': word_embeddings, 'a': a, 'p': p, 'ccx': ccx}
        return S - tf.matmul(S, ccx * tf.transpose(ccx)), sv
shalo_base.py 文件源码 项目:shalo 作者: henryre 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _get_embedding(self):
        """
        Return embedding tensor (either constant or variable)
        Row 0 is 0 vector for no token
        Row 1 is random initialization for UNKNOWN
        Rows 2 : 2 + len(self.embedding_words) are pretrained initialization
        Remaining rows are random initialization
        """
        zero = tf.constant(0.0, dtype=tf.float32, shape=(1, self.d))
        s = self.seed - 1
        unk = tf.Variable(tf.random_normal((1, self.d), stddev=SD, seed=s))
        pretrain = tf.Variable(self.embeddings_train, dtype=tf.float32)
        vecs = [zero, unk, pretrain]
        n_r = self.word_dict.num_words() - len(self.embedding_words_train)
        if n_r > 0:
            r = tf.Variable(tf.random_normal((n_r, self.d), stddev=SD, seed=s))
            vecs.append(r)
        self.U = tf.concat(vecs, axis=0, name='embedding_matrix')
        return self.U
losses.py 文件源码 项目:acdc_segmenter 作者: baumgach 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pixel_wise_cross_entropy_loss_weighted(logits, labels, class_weights):
    '''
    Weighted cross entropy loss, with a weight per class
    :param logits: Network output before softmax
    :param labels: Ground truth masks
    :param class_weights: A list of the weights for each class
    :return: weighted cross entropy loss
    '''

    n_class = len(class_weights)

    flat_logits = tf.reshape(logits, [-1, n_class])
    flat_labels = tf.reshape(labels, [-1, n_class])

    class_weights = tf.constant(np.array(class_weights, dtype=np.float32))

    weight_map = tf.multiply(flat_labels, class_weights)
    weight_map = tf.reduce_sum(weight_map, axis=1)

    loss_map = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits, labels=flat_labels)
    weighted_loss = tf.multiply(loss_map, weight_map)

    loss = tf.reduce_mean(weighted_loss)

    return loss
data_handler.py 文件源码 项目:tf-crnn 作者: solivr 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def image_reading(path: str, resized_size: Tuple[int, int]=None, data_augmentation: bool=False,
                  padding: bool=False) -> Tuple[tf.Tensor, tf.Tensor]:
    # Read image
    image_content = tf.read_file(path, name='image_reader')
    image = tf.cond(tf.equal(tf.string_split([path], '.').values[1], tf.constant('jpg', dtype=tf.string)),
                    true_fn=lambda: tf.image.decode_jpeg(image_content, channels=1, try_recover_truncated=True), # TODO channels = 3 ?
                    false_fn=lambda: tf.image.decode_png(image_content, channels=1), name='image_decoding')

    # Data augmentation
    if data_augmentation:
        image = augment_data(image)

    # Padding
    if padding:
        with tf.name_scope('padding'):
            image, img_width = padding_inputs_width(image, resized_size, increment=CONST.DIMENSION_REDUCTION_W_POOLING)
    # Resize
    else:
        image = tf.image.resize_images(image, size=resized_size)
        img_width = tf.shape(image)[1]

    with tf.control_dependencies([tf.assert_equal(image.shape[:2], resized_size)]):
        return image, img_width
base_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def add_input_op(self, xavier):
        with tf.variable_scope('embed'):
            # first the embed the input
            if self.config.train_input_embeddings:
                if self.config.input_embedding_matrix:
                    initializer = tf.constant_initializer(self.config.input_embedding_matrix)
                else:
                    initializer = xavier
                input_embed_matrix = tf.get_variable('input_embedding',
                                                     shape=(self.config.dictionary_size, self.config.embed_size),
                                                     initializer=initializer)
            else:
                input_embed_matrix = tf.constant(self.config.input_embedding_matrix)

            # dictionary size x embed_size
            assert input_embed_matrix.get_shape() == (self.config.dictionary_size, self.config.embed_size)

            # now embed the output
            if self.config.train_output_embeddings:
                output_embed_matrix = tf.get_variable('output_embedding',
                                                      shape=(self.config.output_size, self.config.output_embed_size),
                                                      initializer=xavier)
            else:
                output_embed_matrix = tf.constant(self.config.output_embedding_matrix)

            assert output_embed_matrix.get_shape() == (self.config.output_size, self.config.output_embed_size)

        inputs = tf.nn.embedding_lookup([input_embed_matrix], self.input_placeholder)
        # batch size x max length x embed_size
        assert inputs.get_shape()[1:] == (self.config.max_length, self.config.embed_size)
        return inputs, output_embed_matrix
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_mix"):
      float_labels = tf.cast(labels, tf.float32)
      if FLAGS.support_type=="class":
        seq = np.loadtxt(FLAGS.class_file)
        tf_seq = tf.one_hot(tf.constant(seq,dtype=tf.int32),FLAGS.encoder_size)
        float_classes_org = tf.matmul(float_labels,tf_seq)
        class_true = tf.ones(tf.shape(float_classes_org))
        class_false = tf.zeros(tf.shape(float_classes_org))
        float_classes = tf.where(tf.greater(float_classes_org, class_false), class_true, class_false)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="frequent":
        float_classes = float_labels[:,0:FLAGS.encoder_size]
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="encoder":
        float_classes = float_labels
        for i in range(FLAGS.encoder_layers):
          var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
          weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
          bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
          float_classes = tf.nn.xw_plus_b(float_classes,weight_i,bias_i)
          if i<FLAGS.encoder_layers-1:
            float_classes = tf.nn.relu(float_classes)
          else:
            float_classes = tf.nn.sigmoid(float_classes)
            #float_classes = tf.nn.relu(tf.sign(float_classes - 0.5))
        cross_entropy_class = self.calculate_mseloss(predictions_class,float_classes)
      else:
        float_classes = float_labels
        for i in range(FLAGS.moe_layers-1):
          float_classes = tf.concat((float_classes,float_labels),axis=1)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_softmax_mix"):
      vocab_size = labels.get_shape().as_list()[1]
      cross_entropy_class = tf.constant(0.0)
      for i in range(FLAGS.moe_layers):
        predictions_subclass = predictions_class[:,i*vocab_size:(i+1)*vocab_size]
        cross_entropy_class = cross_entropy_class + self.calculate_loss(predictions_subclass,labels)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class
losses_embedding.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 54 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
        with tf.name_scope("loss_xent"):
            epsilon = 10e-6
            origin_labels = tf.cast(labels, tf.float32)
            vocab_size = origin_labels.get_shape().as_list()[1]
            float_labels = tf.tile(tf.reshape(origin_labels,[-1, 1, vocab_size]),[1,FLAGS.top_k,1])
            float_labels = tf.reshape(float_labels,[-1,vocab_size])
            cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
                1 - float_labels) * tf.log(1 - predictions + epsilon)
            cross_entropy_loss = tf.negative(cross_entropy_loss)
            num_labels = tf.minimum(tf.reduce_sum(origin_labels,axis=1),tf.constant(FLAGS.top_k,dtype=tf.float32))
            mask = tf.reshape(tf.sequence_mask(num_labels,tf.constant(FLAGS.top_k,dtype=tf.float32),dtype=tf.float32),[-1])
            cross_entropy_loss = tf.reduce_sum(tf.reduce_sum(cross_entropy_loss, 1)*mask)/(tf.reduce_sum(mask)+epsilon)

            return cross_entropy_loss
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def threshold_from_predictions(y, y_pred, false_positive_margin=0, recall=1):
    """Determines a threshold for classifying examples as positive

    Args:
        y: labels
        y_pred: scores from the classifier
        recall: Threshold is set to classify at least this fraction of positive
            labelled examples as positive
        false_positive_margin: Threshold is set to acheive desired recall, and
            then is extended to include an additional fraction of negative
            labelled examples equal to false_positive_margin (This allows adding
            a buffer to the threshold while maintaining a constant "cost")
    """
    n_positive = np.count_nonzero(y)

    n_negative = len(y) - n_positive
    if n_positive == 0:
        return np.max(y_pred)
    if false_positive_margin == 0 and recall == 1:
        return np.min(y_pred[y])
    ind = np.argsort(y_pred)
    y_pred_sorted = y_pred[ind]
    y_sorted = y[ind]
    so_far = [0, 0]
    j = 0
    for i in reversed(range(len(y_sorted))):
        so_far[y_sorted[i]] += 1
        if so_far[1] >= int(np.floor(recall * n_positive)):
            j = i
            break
    so_far = [0, 0]
    if false_positive_margin == 0:
        return y_pred_sorted[j]
    k = 0
    for i in reversed(range(j)):
        so_far[y_sorted[i]] += 1
        if so_far[0] >= false_positive_margin * n_negative:
            k = i
            break
    return y_pred_sorted[k]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def threshold_from_predictions(y, y_pred, false_positive_margin=0, recall=1):
    """Determines a threshold for classifying examples as positive

    Args:
        y: labels
        y_pred: scores from the classifier
        recall: Threshold is set to classify at least this fraction of positive
            labelled examples as positive
        false_positive_margin: Threshold is set to acheive desired recall, and
            then is extended to include an additional fraction of negative
            labelled examples equal to false_positive_margin (This allows adding
            a buffer to the threshold while maintaining a constant "cost")
    """
    n_positive = np.count_nonzero(y)

    n_negative = len(y) - n_positive
    if n_positive == 0:
        return np.max(y_pred)
    if false_positive_margin == 0 and recall == 1:
        return np.min(y_pred[y])
    ind = np.argsort(y_pred)
    y_pred_sorted = y_pred[ind]
    y_sorted = y[ind]
    so_far = [0, 0]
    j = 0
    for i in reversed(range(len(y_sorted))):
        so_far[y_sorted[i]] += 1
        if so_far[1] >= int(np.floor(recall * n_positive)):
            j = i
            break
    so_far = [0, 0]
    if false_positive_margin == 0:
        return y_pred_sorted[j]
    k = 0
    for i in reversed(range(j)):
        so_far[y_sorted[i]] += 1
        if so_far[0] >= false_positive_margin * n_negative:
            k = i
            break
    return y_pred_sorted[k]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def threshold_from_predictions(y, y_pred, false_positive_margin=0, recall=1):
    """Determines a threshold for classifying examples as positive

    Args:
        y: labels
        y_pred: scores from the classifier
        recall: Threshold is set to classify at least this fraction of positive
            labelled examples as positive
        false_positive_margin: Threshold is set to acheive desired recall, and
            then is extended to include an additional fraction of negative
            labelled examples equal to false_positive_margin (This allows adding
            a buffer to the threshold while maintaining a constant "cost")
    """
    n_positive = np.count_nonzero(y)

    n_negative = len(y) - n_positive
    if n_positive == 0:
        return np.max(y_pred)
    if false_positive_margin == 0 and recall == 1:
        return np.min(y_pred[y])
    ind = np.argsort(y_pred)
    y_pred_sorted = y_pred[ind]
    y_sorted = y[ind]
    so_far = [0, 0]
    j = 0
    for i in reversed(range(len(y_sorted))):
        so_far[y_sorted[i]] += 1
        if so_far[1] >= int(np.floor(recall * n_positive)):
            j = i
            break
    so_far = [0, 0]
    if false_positive_margin == 0:
        return y_pred_sorted[j]
    k = 0
    for i in reversed(range(j)):
        so_far[y_sorted[i]] += 1
        if so_far[0] >= false_positive_margin * n_negative:
            k = i
            break
    return y_pred_sorted[k]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def threshold_from_predictions(y, y_pred, false_positive_margin=0, recall=1):
    """Determines a threshold for classifying examples as positive

    Args:
        y: labels
        y_pred: scores from the classifier
        recall: Threshold is set to classify at least this fraction of positive
            labelled examples as positive
        false_positive_margin: Threshold is set to acheive desired recall, and
            then is extended to include an additional fraction of negative
            labelled examples equal to false_positive_margin (This allows adding
            a buffer to the threshold while maintaining a constant "cost")
    """
    n_positive = np.count_nonzero(y)

    n_negative = len(y) - n_positive
    if n_positive == 0:
        return np.max(y_pred)
    if false_positive_margin == 0 and recall == 1:
        return np.min(y_pred[y])
    ind = np.argsort(y_pred)
    y_pred_sorted = y_pred[ind]
    y_sorted = y[ind]
    so_far = [0, 0]
    j = 0
    for i in reversed(range(len(y_sorted))):
        so_far[y_sorted[i]] += 1
        if so_far[1] >= int(np.floor(recall * n_positive)):
            j = i
            break
    so_far = [0, 0]
    if false_positive_margin == 0:
        return y_pred_sorted[j]
    k = 0
    for i in reversed(range(j)):
        so_far[y_sorted[i]] += 1
        if so_far[0] >= false_positive_margin * n_negative:
            k = i
            break
    return y_pred_sorted[k]
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def normalized_columns_initializer(std=1.0):
    def _initializer(shape, dtype=None, partition_info=None):
        out = np.random.randn(*shape).astype(np.float32)
        out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
        return tf.constant(out)
    return _initializer
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def normalized_columns_initializer(std=1.0):
    def _initializer(shape, dtype=None, partition_info=None):
        out = np.random.randn(*shape).astype(np.float32)
        out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
        return tf.constant(out)
    return _initializer
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def gauss_log_prob(mu, logstd, x):
    var = tf.exp(2*logstd)
    gp = -tf.square(x - mu)/(2*var) - .5*tf.log(tf.constant(2*np.pi)) - logstd
    return  tf.reduce_sum(gp, [1])
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def gauss_ent(mu, logstd):
    h = tf.reduce_sum(logstd + tf.constant(0.5*np.log(2*np.pi*np.e), tf.float32))
    return h


问题


面经


文章

微信
公众号

扫码关注公众号