python类minimum()的实例源码

approximators.py 文件源码 项目:dqn_vizdoom_theano 作者: mihahauke 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def build_loss_expression(predicted, target):

        abs_err = abs(predicted - target)
        quadratic_part = tensor.minimum(abs_err, 1)
        linear_part = abs_err - quadratic_part
        loss = (0.5 * quadratic_part ** 2 + linear_part)
        return loss
queue_base.py 文件源码 项目:lstmprovisor-python 作者: Impro-Visor 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def queue_transform(feature_strengths, feature_vects, return_strengths=False):
        """
        Process features according to a "fragmented queue", where each timestep
        gets a size-1 window onto a feature queue. Effectively,
            feature_strengths gives how much to push onto queue
            feature_vects gives what to push on
            pop weights are tied to feature_strengths
            output is a size-1 peek (without popping)

        Parameters:
            - feature_strengths: float32 tensor of shape (batch, push_timestep) in [0,1]
            - feature_vects: float32 tensor of shape (batch, push_timestep, feature_dim)

        Returns:
            - peek_vects: float32 tensor of shape (batch, timestep, feature_dim)
        """
        n_batch, n_time, n_feature = feature_vects.shape

        cum_sum_str = T.extra_ops.cumsum(feature_strengths, 1)

        # We will be working in (batch, timestep, push_timestep)
        # For each timestep, if we subtract out the sum of pushes before that timestep
        # and then cap to 0-1 we get the cumsums for just the features active in that
        # timestep
        timestep_adjustments = T.shape_padright(cum_sum_str - feature_strengths)
        push_time_cumsum = T.shape_padaxis(cum_sum_str, 1)
        relative_cumsum = push_time_cumsum - timestep_adjustments
        capped_cumsum = T.minimum(T.maximum(relative_cumsum, 0), 1)

        # Now we can recover the peek strengths by taking a diff
        shifted = T.concatenate([T.zeros((n_batch, n_time, 1)), capped_cumsum[:,:,:-1]],2)
        peek_strengths = capped_cumsum-shifted
        # Peek strengths is now (batch, timestep, push_timestep)

        result = T.batched_dot(peek_strengths, feature_vects)

        if return_strengths:
            return peek_strengths, result
        else:
            return result
theano_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
theano_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
base.py 文件源码 项目:seq2graph 作者: masterkeywikz 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _differentiate(self, params=None):
        '''Return a sequence of gradients for our parameters.

        If this optimizer has been configured with a gradient norm limit, or
        with elementwise gradient clipping, this method applies the appropriate
        rescaling and clipping operations before returning the gradient.

        Parameters
        ----------
        params : list of Theano variables, optional
            Return the gradient with respect to these parameters. Defaults to
            all parameters that the optimizer knows about.

        Yields
        ------
        pairs : (param, grad) tuples
            Generates a sequence of tuples representing each of the parameters
            requested and the corresponding Theano gradient expressions.
        '''
        if params is None:
            params = self._params
        for param, grad in zip(params, TT.grad(self._loss, params)):
            if self.max_gradient_elem > 0:
                limit = util.as_float(self.max_gradient_elem)
                yield param, TT.clip(grad, -limit, limit)
            elif self.max_gradient_norm > 0:
                norm = TT.sqrt((grad * grad).sum())
                limit = util.as_float(self.max_gradient_norm)
                yield param, grad * TT.minimum(1, limit / norm)
            else:
                yield param, grad
adaptive.py 文件源码 项目:seq2graph 作者: masterkeywikz 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _get_updates_for(self, param, grad):
        grad_tm1 = shared_like(param, 'grad')
        step_tm1 = shared_like(param, 'step', self.learning_rate.eval())
        test = grad * grad_tm1
        diff = TT.lt(test, 0)
        steps = step_tm1 * (TT.eq(test, 0) +
                            TT.gt(test, 0) * self.step_increase +
                            diff * self.step_decrease)
        step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
        grad = grad - diff * grad
        yield param, param - TT.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step
theano_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
theano_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
basic.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def infer_shape(self, nodes, shapes):
        return [(tensor.minimum(*shapes[0]), )]
basic.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def structured_minimum(x, y):
    """
    Structured elemwise minimum of sparse matrix x by scalar y.

    """
    # see decorator for function body
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_shape_inequality_with_self(self):
        x = T.vector('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
                                                           'local_shape_to_shape_i',
                                                           'local_track_shape_i',
                                                           'local_subtensor_make_vector')
        f = theano.function([x], T.lt(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.maximum(x.shape[0], 0), mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, Shape_i), topo[0].op
        x_val = numpy.ones(100, dtype=config.floatX)
        assert f(x_val) == x_val.shape[0]

        f = theano.function([x], T.maximum(0, x.shape[0]), mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, Shape_i), topo[0].op
        x_val = numpy.ones(100, dtype=config.floatX)
        assert f(x_val) == x_val.shape[0]

        f = theano.function([x], T.minimum(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)
        assert f(x_val) == 0

        f = theano.function([x], T.minimum(0, x.shape[0]), mode=mode)
        self.assert_eqs_const(f, 0)
        assert f(x_val) == 0
        f = theano.function([x], T.minimum([0, 0], x.shape[0]), mode=mode)
        # This case isn't optimized.
#        self.assert_eqs_const(f, 0)
        utt.assert_allclose(f(x_val), [0, 0])
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_elemwise(self):
        # float Ops
        mats = theano.tensor.matrices('cabxy')
        c, a, b, x, y = mats
        s1 = T.switch(c, a, b)
        s2 = T.switch(c, x, y)
        for op in (T.add, T.sub, T.mul, T.true_div, T.int_div, T.floor_div,
                   T.minimum, T.maximum, T.gt, T.lt, T.ge, T.le, T.eq, T.neq,
                   T.pow):
            g = optimize(FunctionGraph(mats, [op(s1, s2)]))
            assert str(g).count('Switch') == 1
        # integer Ops
        mats = theano.tensor.imatrices('cabxy')
        c, a, b, x, y = mats
        s1 = T.switch(c, a, b)
        s2 = T.switch(c, x, y)
        for op in (T.and_, T.or_, T.xor,
                   T.bitwise_and, T.bitwise_or, T.bitwise_xor):
            g = optimize(FunctionGraph(mats, [op(s1, s2)]))
            assert str(g).count('Switch') == 1
        # add/mul with more than two inputs
        u, v = theano.tensor.matrices('uv')
        s3 = T.switch(c, u, v)
        for op in (T.add, T.mul):
            g = optimize(FunctionGraph(mats + [u, v], [op(s1, s2, s3)]))
            assert str(g).count('Switch') == 1
theano_backend.py 文件源码 项目:Hat 作者: qiuqiangkong 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def relu(x, alpha, max_value):
    y = T.nnet.relu(x, alpha)
    if max_value is not None:
        y = T.minimum(y, max_value)
    return y

### objectives
# cross entropy loss. y_pred, y_gt should be 2D, DO NOT USE tensor.nnet.categorical_crossentropy
backend_theano.py 文件源码 项目:statestream 作者: VolkerFischer 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
example_support.py 文件源码 项目:PyGame-Learning-Environment 作者: ntasfi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def q_loss(self, y_true, y_pred):
        # assume clip_delta is 1.0
        # along with sum accumulator.
        diff = y_true - y_pred
        _quad = T.minimum(abs(diff), 1.0)
        _lin = abs(diff) - _quad
        loss = 0.5 * _quad ** 2 + _lin
        loss = T.sum(loss)

        return loss
theano_backend.py 文件源码 项目:InnerOuterRNN 作者: Chemoinformatics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)
theano_backend.py 文件源码 项目:InnerOuterRNN 作者: Chemoinformatics 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
                                     'Theano is out of date. '
                                     'Install the latest version with:\n'
                                     'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
mytheano_utils.py 文件源码 项目:Precise-CTC 作者: Michlong 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def editdist(s, t):
    def update(x, previous_row):
        current_row = previous_row + 1
        current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], tensor.add(previous_row[:-1], tensor.neq(target,x))))
        current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], current_row[0:-1] + 1))
        return current_row
    source, target = ifelse(tensor.lt(s.shape[0], t.shape[0]), (t, s), (s, t))
    previous_row = tensor.arange(target.size + 1, dtype=theano.config.floatX)
    result, updates = theano.scan(fn = update, sequences=source, outputs_info=previous_row, name='editdist')
    return result[-1,-1]

# numpy version
# from [https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python, the 5th version]
theano_backend.py 文件源码 项目:odin_old 作者: trungnt13 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def minimum(x, y):
    return T.minimum(x, y)

# ===========================================================================
# SHAPE OPERATIONS
# ===========================================================================
theano_backend.py 文件源码 项目:odin_old 作者: trungnt13 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def relu(x, alpha=0., max_value=None):
    assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
                                     'Theano is out of date. '
                                     'Install the latest version with:\n'
                                     'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x


问题


面经


文章

微信
公众号

扫码关注公众号