test.py 文件源码

python
阅读 40 收藏 0 点赞 0 评论 0

项目:sdp 作者: tansey 项目源码 文件源码
def __init__(self, length, k, lam, neighbor_radius):
        with tf.variable_scope(type(self).__name__):
            self.length = length
            # Trend filtering setup
            self.k = k
            self.neighbor_radius = neighbor_radius
            self.neighborhood_size = 2 * self.neighbor_radius + 1
            self.lam = lam * length / (self.neighborhood_size**2)
            self.D = tf_get_delta(get_sparse_penalty_matrix((self.neighborhood_size,)), k) # Local patch to smooth
            # Multiscale setup
            self.bins = [np.arange(self.length)]
            self.num_nodes = int(2**np.ceil(np.log2(self.length))) - 1
            self.path_length = int(np.ceil(np.log2(length)))
            # Binomial likelihoods loss function
            self.q_indices = tf.placeholder(tf.int32, [None, self.path_length])
            self.splits = tf.placeholder(tf.float32, [None, self.path_length])
            self.q = tf.Variable([0.]*self.num_nodes)
            self.sampled_q = tf.gather(self.q, self.q_indices)
            self.sampled_probs = tf.reciprocal(1 + tf.exp(-self.sampled_q))
            self.log_left_probs = self.splits * tf.log(tf.clip_by_value(self.sampled_probs, 1e-10, 1.0))
            self.log_right_probs = (1 - self.splits) * tf.log(tf.clip_by_value(1 - self.sampled_probs, 1e-10, 1.0))
            self.log_probs = tf.reduce_mean(-tf.reduce_sum(self.log_left_probs+self.log_right_probs, axis=[1]))
            # Smooth a local patch centered on the target variables
            self.neighborhood_indexes = tf.placeholder(tf.int32, [None, self.neighborhood_size, self.path_length])
            self.neighborhood_splits = tf.placeholder(tf.float32, [None, self.neighborhood_size, self.path_length])
            self.neighborhood_q = tf.gather(self.q, self.neighborhood_indexes)
            self.neighborhood_probs = tf.reciprocal(1 + tf.exp(-self.neighborhood_q))
            self.neighborhood_log_left = self.neighborhood_splits * tf.log(tf.clip_by_value(self.neighborhood_probs, 1e-10, 1.0))
            self.neighborhood_log_right = (1 - self.neighborhood_splits) * tf.log(tf.clip_by_value(1 - self.neighborhood_probs, 1e-10, 1.0))
            self.neighborhood_log_probs = tf.reduce_sum(self.neighborhood_log_left+self.neighborhood_log_right, axis=[2])
            self.reg = tf.reduce_sum(tf.abs(batch_sparse_tensor_dense_matmul(self.D, tf.expand_dims(self.neighborhood_log_probs, -1))))
            # Add the loss and regularization penalty together
            self.loss = self.log_probs + self.lam * self.reg
            self.sampled_density = tf.reduce_prod(tf.where(self.splits > 0, self.sampled_probs, 1 - self.sampled_probs), axis=[1])
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号