python类where()的实例源码

train.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_weights_by_predictions(labels_batch, predictions):
  epsilon = 1e-6
  float_labels = tf.cast(labels_batch, dtype=tf.float32)
  cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
      1 - float_labels) * tf.log(1 - predictions + epsilon)
  ce = tf.reduce_sum(tf.negative(cross_entropy_loss), axis=1)
  mean_ce = tf.reduce_mean(ce + epsilon)
  weights = tf.where(ce > mean_ce, 
                     3.0 * tf.ones_like(ce),
                     0.5 * tf.ones_like(ce))
  return weights
convert.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def convert_f0(f0, src, trg):
    mu_s, std_s = np.fromfile(os.path.join('./etc', '{}.npf'.format(src)), np.float32)
    mu_t, std_t = np.fromfile(os.path.join('./etc', '{}.npf'.format(trg)), np.float32)
    lf0 = tf.where(f0 > 1., tf.log(f0), f0)
    lf0 = tf.where(lf0 > 1., (lf0 - mu_s)/std_s * std_t + mu_t, lf0)
    lf0 = tf.where(lf0 > 1., tf.exp(lf0), lf0)
    return lf0
layers.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def selu(x):
    with tf.name_scope('selu'):
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
sparse.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def dense_to_sparse(inputs: tf.Tensor, mask: Optional[tf.Tensor]=None) -> tf.SparseTensor:
    """
    Convert the given ``inputs`` tensor to a ``SparseTensor`` of its non-zero values.

    Optionally, use the given mask tensor for determining the values to be included in the ``SparseTensor``.

    :param inputs: input dense tensor
    :param mask: optional mask tensor
    :return: sparse tensor of non-zero (or masked) values
    """
    idx = tf.where(tf.not_equal((mask if mask is not None else inputs), 0))
    return tf.SparseTensor(idx, tf.gather_nd(inputs, idx), tf.shape(inputs, out_type=tf.int64))
hmc.py 文件源码 项目:a-nice-mc 作者: ermongroup 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def hmc_updates(initial_pos, stepsize, avg_acceptance_rate, final_pos, accept,
                target_acceptance_rate, stepsize_inc, stepsize_dec,
                stepsize_min, stepsize_max, avg_acceptance_slowness):
    new_pos = tf.where(accept, final_pos, initial_pos)
    new_stepsize_ = tf.multiply(
        stepsize,
        tf.where(tf.greater(avg_acceptance_rate, target_acceptance_rate), stepsize_inc, stepsize_dec)
    )
    new_stepsize = tf.maximum(tf.minimum(new_stepsize_, stepsize_max), stepsize_min)
    new_acceptance_rate = tf.add(
        avg_acceptance_slowness * avg_acceptance_rate,
        (1.0 - avg_acceptance_slowness) * tf.reduce_mean(tf.to_float(accept))
    )
    return new_pos, new_stepsize, new_acceptance_rate
statistics.py 文件源码 项目:a-nice-mc 作者: ermongroup 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, energy_fn, prior, std=1.0,
                 inter_op_parallelism_threads=1, intra_op_parallelism_threads=1):
        self.energy_fn = energy_fn
        self.prior = prior
        self.z = self.energy_fn.z

        def fn(z, x):
            z_ = z + tf.random_normal(tf.shape(self.z), 0.0, std)
            accept = metropolis_hastings_accept(
                energy_prev=energy_fn(z),
                energy_next=energy_fn(z_)
            )
            return tf.where(accept, z_, z)

        self.steps = tf.placeholder(tf.int32, [])
        elems = tf.zeros([self.steps])
        self.z_ = tf.scan(
            fn, elems, self.z, back_prop=False
        )

        self.sess = tf.Session(
            config=tf.ConfigProto(
                inter_op_parallelism_threads=inter_op_parallelism_threads,
                intra_op_parallelism_threads=intra_op_parallelism_threads
            )
        )
        self.sess.run(tf.global_variables_initializer())
nice.py 文件源码 项目:a-nice-mc 作者: ermongroup 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def __call__(self, inputs, steps, nice_steps=1):
        def nice_proposal(zv, x):
            """
            Nice Proposal (without Metropolis-Hastings).
            `z` is the input state.
            `v` is created as a dummy variable to allow output of v_, for debugging purposes.
            :param zv:
            :param x:
            :return: next state `z_`, and the corresponding auxiliary variable `v_' (without MH).
            """
            z, v = zv
            z_, v_ = self.network([z, v], is_backward=(x < 0.5)) #(tf.random_uniform([]) < 0.5))
            return z_, v_

        def fn(zv, x):
            """
            Transition with Metropolis-Hastings.
            `z` is the input state.
            `v` is created as a dummy variable to allow output of v_, for debugging purposes.
            :param zv: [z, v]. It is written in this form merely to appeal to Python 3.
            :param x: variable only for specifying the number of steps
            :return: next state `z_`, and the corresponding auxiliary variable `v_`.
            """
            z, v = zv
            v = tf.random_normal(shape=tf.stack([tf.shape(z)[0], self.network.v_dim]))
            # z_, v_ = self.network([z, v], is_backward=(tf.random_uniform([]) < 0.5))
            z_, v_ = tf.scan(nice_proposal, x * tf.random_uniform([]), (z, v), back_prop=False)
            z_, v_ = z_[-1], v_[-1]
            ep = hamiltonian(z, v, self.energy_fn)
            en = hamiltonian(z_, v_, self.energy_fn)
            accept = metropolis_hastings_accept(energy_prev=ep, energy_next=en)
            z_ = tf.where(accept, z_, z)
            return z_, v_

        elems = tf.ones([steps, nice_steps])
        return tf.scan(fn, elems, inputs, back_prop=False)
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def dice_whole(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    print(np.shape(y_pred_decision))

    mask_true = K.sum(y_true[:, :, :, :,1:3], axis=4)
    mask_pred = K.sum(y_pred_decision[:, :, :, :, 1:3], axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dice_core(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true1 = y_true[:, :, :, :, 3:]
    mask_true2 = y_true[:, :, :, :, 1:2]
    mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4)
    mask_pred1 = y_pred_decision[:, :, :, :, 3:]
    mask_pred2 = y_pred_decision[:, :, :, :, 1:2]
    mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def dice_enhance(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())

# def accuracy_survival(y_true, y_predicted):
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def dice_whole_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    # mask = K.expand_dims(K.sum(y_true,axis=4),axis=4)
    # cmp_mask = K.concatenate([K.ones_like(mask) - mask,K.zeros_like(mask), K.zeros_like(mask)],axis=4)
    # y_pred = y_pred + cmp_mask

    y_true = y_true[:,:,:,:,:3]
    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = K.sum(y_true, axis=4)
    mask_pred = K.sum(y_pred_decision, axis=4) * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dice_core_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true = y_true[:,:,:,:,:3]


    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision)


    mask_true1 = K.expand_dims(y_true[:, :, :, :, 2],axis=4)
    mask_true2 = K.expand_dims(y_true[:, :, :, :, 0],axis=4)
    mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4)
    mask_pred1 = K.expand_dims(y_pred_decision[:, :, :, :, 2],axis=4)
    mask_pred2 = K.expand_dims(y_pred_decision[:, :, :, :, 0],axis=4)
    mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4) * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def dice_0(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :,0]
    mask_pred = y_pred_decision[:, :, :, :, 0]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 53 收藏 0 点赞 0 评论 0
def dice_1(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :,1]
    mask_pred = y_pred_decision[:, :, :, :, 1]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dice_1_2D(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon())/ K.max(y_pred, axis=2, keepdims=True))

    mask_true = y_true[:, :, 1]
    mask_pred = y_pred_decision[:, :, 1]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dice_2(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dice_3(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def dice_whole_mask(mask):
    def dice_whole_closure(y_true, y_pred):
        """
        Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                        TP
            Dice = 2 -------
                      T + P
        Parameters
        ----------
        y_true : keras.placeholder
            Placeholder that contains the ground truth labels of the classes
        y_pred : keras.placeholder
            Placeholder that contains the class prediction

        Returns
        -------
        scalar
            Dice metric
        """

        y_pred_decision = K.cast(y_pred / K.max(y_pred, axis=1, keepdims=True), 'int8')
        mask_true = K.sum(y_true[:, [1, 2, 3, 4], :, :, :], axis=1)
        mask_pred = K.sum(y_pred_decision[:, [1, 2, 3, 4], :, :, :], axis=1)

        y_sum = K.sum(mask * mask_true * mask_pred)

        return (2. * y_sum + K.epsilon()) / (K.sum(mask * mask_true) + K.sum(mask * mask_pred) + K.epsilon())

    return dice_whole_closure
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dice_core_mask(mask):
    def dice_core_closure(y_true, y_pred):
        """
        Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                        TP
            Dice = 2 -------
                      T + P
        Parameters
        ----------
        y_true : keras.placeholder
            Placeholder that contains the ground truth labels of the classes
        y_pred : keras.placeholder
            Placeholder that contains the class prediction

        Returns
        -------
        scalar
            Dice metric
        """

        y_pred_decision = K.cast(y_pred / K.max(y_pred, axis=1, keepdims=True), 'int8')
        mask_true = K.sum(y_true[:, [1, 3, 4], :, :, :], axis=1)
        mask_pred = K.sum(y_pred_decision[:, [1, 3, 4], :, :, :], axis=1)

        y_sum = K.sum(mask * mask_true * mask_pred)

        return (2. * y_sum + K.epsilon()) / (K.sum(mask * mask_true) + K.sum(mask * mask_pred) + K.epsilon())

    return dice_core_closure
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dice_enhance_mask(mask):
    def dice_enhance_closure(y_true, y_pred):
        """
        Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                        TP
            Dice = 2 -------
                      T + P
        Parameters
        ----------
        y_true : keras.placeholder
            Placeholder that contains the ground truth labels of the classes
        y_pred : keras.placeholder
            Placeholder that contains the class prediction

        Returns
        -------
        scalar
            Dice metric
        """

        y_pred_decision = K.cast(y_pred / K.max(y_pred, axis=1, keepdims=True), 'int8')
        mask_true = y_true[:, 4, :, :, :]
        mask_pred = y_pred_decision[:, 4, :, :, :]

        y_sum = K.sum(mask * mask_true * mask_pred)

        return (2. * y_sum + K.epsilon()) / (K.sum(mask * mask_true) + K.sum(mask * mask_pred) + K.epsilon())

    return dice_enhance_closure


问题


面经


文章

微信
公众号

扫码关注公众号