python类minimum()的实例源码

objectives.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def iou_loss(p, t):
    # print "pass"
    tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
    overlaps_t0 = T.maximum(tp[:, 0, :], tt[:, 0, :])
    overlaps_t1 = T.minimum(tp[:, 1, :], tt[:, 1, :])
    intersection = overlaps_t1 - overlaps_t0
    bool_overlap = T.min(intersection, axis=1) > 0
    intersection = intersection[:, 0] * intersection[:, 1]
    intersection = T.maximum(intersection, np.float32(0.))
    dims_p = tp[:, 1, :] - tp[:, 0, :]
    areas_p = dims_p[:, 0] * dims_p[:, 1]
    dims_t = tt[:, 1, :] - tt[:, 0, :]
    areas_t = dims_t[:, 0] * dims_t[:, 1]
    union = areas_p + areas_t - intersection
    loss = 1. - T.minimum(
        T.exp(T.log(T.abs_(intersection)) -
              T.log(T.abs_(union) + np.float32(1e-5))),
        np.float32(1.)
    )
    # return loss
    return T.mean(loss)
objectives.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def iou_loss_val(p, t):
    tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
    overlaps = np.zeros_like(tp, dtype=np.float32)
    overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
    overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
    intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
    bool_overlap = np.min(intersection, axis=1) > 0
    intersection = intersection[:, 0] * intersection[:, 1]
    intersection = np.maximum(intersection, 0.)
    # print "bool", bool_overlap
    # print "Int", intersection
    dims_p = tp[:, 1, :] - tp[:, 0, :]
    areas_p = dims_p[:, 0] * dims_p[:, 1]
    dims_t = tt[:, 1, :] - tt[:, 0, :]
    areas_t = dims_t[:, 0] * dims_t[:, 1]
    union = areas_p + areas_t - intersection
    # print "un", union
    loss = 1. - np.minimum(
        np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
        1.
    )
    # print loss
    return np.mean(loss)
theano_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
theano_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
theano_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
theano_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
theano_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_inequality_with_self(self):
        x = T.scalar('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')

        f = theano.function([x], T.lt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.le(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.gt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.minimum(x, x), mode=mode)
        self.assert_identity(f)

        f = theano.function([x], T.maximum(x, x), mode=mode)
        self.assert_identity(f)
mytheano_utils.py 文件源码 项目:Precise-CTC 作者: Michlong 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def editdist_np(source, target):
    if len(source) < len(target):
        return editdist_np(target, source)
    if len(target) == 0:
        return len(source)

    previous_row = np.arange(target.size + 1)
    for s in source:
        current_row = previous_row + 1
        current_row[1:] = np.minimum(current_row[1:], np.add(previous_row[:-1], target != s))
        current_row[1:] = np.minimum(current_row[1:], current_row[0:-1] + 1)
        previous_row = current_row

    return previous_row[-1]

# Pure python version
# from [https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python, the 6th version]
ctc_theano.py 文件源码 项目:Precise-CTC 作者: Michlong 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _editdist(s, t):
        """
        Levenshtein's edit distance function
        :param s: vector, source string
        :param t: vector, target string
        :return:  edit distance, scalar
        """
        def update(x, previous_row):
            current_row = previous_row + 1
            current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], tensor.add(previous_row[:-1], tensor.neq(target,x))))
            current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], current_row[0:-1] + 1))
            return current_row
        source, target = ifelse(tensor.lt(s.shape[0], t.shape[0]), (t, s), (s, t))
        previous_row = tensor.arange(target.size + 1, dtype=theano.config.floatX)
        result, updates = theano.scan(fn=update, sequences=source, outputs_info=previous_row, name='editdist')
        return result[-1,-1]
pdnnet.py 文件源码 项目:pdnn 作者: petered 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def past_weight_grad_calculator(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):
    """
    Do an efficient update of the weights given the two spike-trains.

    This isn't actually implemented as an efficient update, but it will produce the identical result as if it were.

    :param xs: An (n_samples, n_in) array
    :param es: An (n_samples, n_out) array
    :param kp_x: kp for the x units
    :param kd_x: kd for the x units
    :param kp_e: kp for the e units
    :param kd_e: kd for the e units
    :param shapes: (minibatch_size, n_in, n_out)
    :return: An (n_in, n_out) approximate weight gradient.
    """
    # TODO: Make this actually use sparsity, one of these days.
    kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
    n_samples, n_in, n_out = shapes
    rx = kd_x/(kp_x+kd_x)
    re = kd_e/(kp_e+kd_e)

    tx_last = create_shared_variable(np.zeros((n_samples, n_in))+1)
    te_last = create_shared_variable(np.zeros((n_samples, n_out))+1)
    x_last = create_shared_variable(np.zeros((n_samples, n_in)))
    e_last = create_shared_variable(np.zeros((n_samples, n_out)))

    t_last = tt.minimum(tx_last[:, :, None], te_last[:, None, :])
    x_spikes = tt.neq(xs, 0)
    dw_potentials = x_last[:, :, None] * e_last[:, None, :] * \
            rx**(tx_last[:, :, None]-t_last) \
            * re**(te_last[:, None, :]-t_last) \
            * geoseries_sum(rx*re, t_end=t_last, t_start=1)
    e_spikes = tt.neq(es, 0)
    dws = (x_spikes[:, :, None]+e_spikes[:, None, :]-x_spikes[:, :, None]*e_spikes[:, None, :])*dw_potentials  # (n_samples, n_in, n_out)

    add_update(x_last, tt.switch(x_spikes, x_last*rx**tx_last + xs/as_floatx(kd_x), x_last))
    add_update(e_last, tt.switch(e_spikes, e_last*rx**te_last + es/as_floatx(kd_e), e_last))
    add_update(tx_last, tt.switch(x_spikes, 1, tx_last+1))
    add_update(te_last, tt.switch(e_spikes, 1, te_last+1))
    return dws.sum(axis=0)
models.py 文件源码 项目:punctuator2 作者: ottokart 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def PReLU(a, x):
    return T.maximum(0.0, x) + a * T.minimum(0.0, x)
theano_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
theano_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
theano_backend.py 文件源码 项目:keraflow 作者: ipod825 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def minimum(self, x, y):
        return T.minimum(x, y)
theano_backend.py 文件源码 项目:keraflow 作者: ipod825 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def relu(self, x, alpha=0., max_value=None):
        x = T.nnet.relu(x, alpha)
        if max_value is not None:
            x = T.minimum(x, max_value)
        return x
theano_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
theano_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
convvae.py 文件源码 项目:deep-learning-models 作者: kuleshov 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def create_objectives(self, deterministic=False):
    """ELBO objective with the analytic expectation trick"""
    # load network input
    X = self.inputs[0]

    # load network output
    if self.model == 'bernoulli':
      q_mu, q_logsigma, sample, _ \
          = lasagne.layers.get_output(self.network[2:], deterministic=deterministic)
    elif self.model in ('gaussian', 'svhn'):
      p_mu, p_logsigma, q_mu, q_logsigma, _, _ \
          = lasagne.layers.get_output(self.network, deterministic=deterministic)

    # first term of the ELBO: kl-divergence (using the closed form expression)
    kl_div = 0.5 * T.sum(1 + 2*q_logsigma - T.sqr(q_mu) 
                         - T.exp(2 * T.minimum(q_logsigma,50)), axis=1).mean()

    # second term: log-likelihood of the data under the model
    if self.model == 'bernoulli':
      logpxz = -lasagne.objectives.binary_crossentropy(sample, X.flatten(2)).sum(axis=1).mean()
    elif self.model in ('gaussian', 'svhn'):
      # def log_lik(x, mu, log_sig):
      #     return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + log_sig)
      #                   - 0.5 * T.sqr(x - mu) / T.exp(2 * log_sig), axis=1)
      # logpxz = log_lik(X.flatten(2), p_mu, p_logsigma).mean()
      logpxz = log_normal2(X.flatten(2), p_mu, p_logsigma).sum(axis=1).mean()

    loss = -1 * (logpxz + kl_div)

    # we don't use the spearate accuracy metric right now
    return loss, -kl_div
activations.py 文件源码 项目:drmad 作者: bigaidream-projects 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def leaky_relu(x):
    output = T.maximum(0., x) + leaky_slope*T.minimum(0, x)
    return output
similarity_encoder.py 文件源码 项目:simec 作者: cod3licious 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def exp_cutoff(x):
    # 1 or less with heavy tail
    # the upper 80% of the values is at 1
    b = 0.8
    c = x.min() + b*(x.max()-x.min())
    # the lower 20% of the values is below y
    y = 0.15
    a = y/T.exp((0.2-b)*(x.max()-x.min()))
    return T.minimum(a * T.exp(x - c), 1.)
theano_backend.py 文件源码 项目:deeplift 作者: kundajelab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
covariance_functions.py 文件源码 项目:bbho 作者: DarkElement75 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, lengthscale, v):
        covariance_function.__init__(self, lengthscale, v)
        self.f = theano.function([x1, x2], T.minimum(x1, x2), allow_input_downcast=True)
theano_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
theano_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
basic.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def prelu(inpt, a):
    '''
    Parametric rectified linear unit, see: https://arxiv.org/pdf/1502.01852.pdf
    '''
    return T.maximum(inpt, 0) + a * T.minimum(inpt, 0)
theano_backend.py 文件源码 项目:reading-text-in-the-wild 作者: mathDR 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)


# SHAPE OPERATIONS
theano_backend.py 文件源码 项目:reading-text-in-the-wild 作者: mathDR 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
                                     'Theano is out of date. '
                                     'Install the latest version with:\n'
                                     'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
activation.py 文件源码 项目:corelm 作者: nusnlp 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_function(self, func_name):
        if func_name == 'tanh':
            return T.tanh
        elif func_name == 'hardtanh':
            L.warning('Current hardTanh implementation is slow!')
            return lambda x: ((abs(x) <= 1) * x) + ((1 < abs(x)) * T.sgn(x))
        elif func_name == 'xtanh':
            return lambda x: T.tanh(x) + 0.1 * x
        elif func_name == 'sigmoid':
            return T.nnet.sigmoid
        elif func_name == 'fastsigmoid':
            L.error('T.nnet.ultra_fast_sigmoid function has some problems')
        elif func_name == 'hardsigmoid':
            return T.nnet.hard_sigmoid
        elif func_name == 'xsigmoid':
            return lambda x: T.nnet.sigmoid(x) + 0.1 * x
        elif func_name == 'softplus':
            return T.nnet.softplus
        elif func_name == 'relu':
            #return lambda x: T.maximum(x, 0)
            return lambda x: x * (x > 0)
            #return T.nnet.relu # Update theano and then use this one instead
        elif func_name == 'leakyrelu':
            return lambda x: T.maximum(x, 0.01 * x)
        elif func_name == 'cappedrelu':
            return lambda x: T.minimum(x * (x > 0), 6)
        elif func_name == 'softmax':
            return T.nnet.softmax
        elif func_name == 'norm1':
            return lambda x: x / T.nlinalg.norm(x, 1)
        elif func_name == 'norm2':
            #return lambda x: x / T.nlinalg.norm(x, 2)
            return lambda x: x / T.dot(x, x)**0.5
        else:
            L.error('Invalid function name given: ' + func_name)
nce.py 文件源码 项目:corelm 作者: nusnlp 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def logadd(self, a, b):
        g = T.maximum(a,b)
        l = T.minimum(a,b)
        return g + T.log(1 + T.exp(l-g))


问题


面经


文章

微信
公众号

扫码关注公众号