python类scalar()的实例源码

test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_kording_bug(self):
        x, y = vectors('xy')
        eps = scalar('eps')
        s = scalar('s')

        #r = theano.tensor.mul(theano.tensor.fill(x, 2.*a), x/a , (y+z) , a)
        #r = theano.tensor.mul((x/a+y) , a, z)
        r = tensor.mul(s - 1,
                       eps + x / s,
                       eps + y / s,
                       s)

        f = function([s, eps, x, y], r ** 2)

        s_val = numpy.asarray(4, dtype=config.floatX)
        eps_val = numpy.asarray(1.e-6, dtype=config.floatX)
        x_val = numpy.asarray([1.5, 2], dtype=config.floatX)
        y_val = numpy.asarray([2.3, 3.1], dtype=config.floatX)

        r0 = f(s_val, eps_val, x_val, y_val)
        r1 = f(s_val, eps_val, x_val, y_val)
        r2 = f(s_val, eps_val, x_val, y_val)

        assert numpy.all(r0 == r1)
        assert numpy.all(r0 == r2)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test1(self):
        # basic test that the optimization work with scalar broadcasted
        x = tensor.matrix('x')
        y = tensor.scalar('y')
        z = tensor.matrix('z')
        f = function([x, y, z], tensor.exp(x + y + z)[0], mode=mode_opt)

        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(f, ops_to_check=[
                Subtensor, tensor.DimShuffle]))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.Subtensor)
        assert isinstance(prog[1].op, tensor.DimShuffle)
        assert isinstance(prog[2].op, tensor.Subtensor)
        assert isinstance(prog[3].op.scalar_op, theano.scalar.
                          Composite)  # Composite{add,add}
        assert len(prog) == 4
        f([[0, 1], [2, 3]], 4, [[4, 5], [6, 7]])
              # let debugmode test something
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test5(self):
        # test that we don't lift when we reuse the output of the
        # elemwise for other computation.
        x = tensor.matrix('x')
        y = tensor.vector('y')
        f = function([x, y], [tensor.exp(x + y)[0], tensor.exp(x + y) + x],
                     mode=mode_opt)

        # Opt doesn't apply, so no need for check_stack_trace
        # self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.DimShuffle)
        assert isinstance(prog[1].op.scalar_op, theano.scalar.
                          Composite)  # Composite{add,exp}
        assert prog[2].op == tensor.add
        assert isinstance(prog[3].op, tensor.Subtensor)  # first subtensor
        assert len(prog) == 4
        f([[0, 1], [2, 3]], [4, 5])  # let debugmode test something
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test6(self):
        # basic test that the optimization works with a scalar as input,
        # and a scalar as output (no broadcasting of the scalar needed).
        # The optimization used to fail and display an ERROR message.

        x = tensor.vector('x')
        y = tensor.scalar('y')
        f = function([x, y], tensor.exp(x + y)[0], mode=mode_opt)

        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.Subtensor)
        # Composite{add,exp}
        assert isinstance(prog[1].op.scalar_op, theano.scalar.Composite)
        assert len(prog) == 2
        f([1, 2, 3], 4)  # let debugmode test something
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_inequality_with_self(self):
        x = T.scalar('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')

        f = theano.function([x], T.lt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.le(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.gt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.minimum(x, x), mode=mode)
        self.assert_identity(f)

        f = theano.function([x], T.maximum(x, x), mode=mode)
        self.assert_identity(f)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_and(self):
        mode = theano.compile.get_default_mode().including('canonicalize')

        x = T.scalar('x', dtype='int8')

        for zero, one in [(numpy.int8(0), numpy.int8(1)), (0, 1)]:
            f = theano.function([x], T.and_(x, zero), mode=mode)
            self.assert_eqs_const(f, 0)

            f = theano.function([x], T.and_(zero, x), mode=mode)
            self.assert_eqs_const(f, 0)

            f = theano.function([x], T.and_(x, one), mode=mode)
            if f.outputs[0].variable.dtype == x.dtype:
                self.assert_identity(f)

            f = theano.function([x], T.and_(one, x), mode=mode)
            if f.outputs[0].variable.dtype == x.dtype:
                self.assert_identity(f)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_test_local_remove_useless_assert2(self):
        # remove assert condition that are always true
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode)

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 1),
                            mode=mode)
        assert f(1, 1) == 1
        assert f(5, 1) == 5
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert len(topo[0].inputs) == 2
        assert topo[1].op == deep_copy_op
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_local_remove_useless_assert3(self):
        # don't remove assert condition that are always false
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode)

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 0),
                            mode=mode)
        self.assertRaises(AssertionError, f, 1, 0)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert len(topo[0].inputs) == 3
        assert topo[1].op == deep_copy_op
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_eq(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x, y], T.eq(x, y), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx, vy)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, T.Elemwise)
        assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ)
        f2 = theano.function([x], T.eq(x, x), mode=self.mode)
        assert numpy.all(f2(vx) == numpy.ones((5, 4)))
        topo2 = f2.maker.fgraph.toposort()
        # Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0
        assert len(topo2) == 3
        assert isinstance(topo2[-1].op, T.Alloc)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_neq(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x, y], T.neq(x, y), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx, vy)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, T.Elemwise)
        assert isinstance(topo[0].op.scalar_op, theano.scalar.NEQ)
        f2 = theano.function([x], T.neq(x, x), mode=self.mode)
        assert numpy.all(f2(vx) == numpy.zeros((5, 4)))
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 3
        assert isinstance(topo2[-1].op, T.Alloc)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_mul(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x], T.mul(x), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == deep_copy_op
        f2 = theano.function([x, y], T.mul(x, y), mode=self.mode)
        assert numpy.all(f2(vx, vy) == vx * vy)
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 1
        assert isinstance(topo2[0].op, T.Elemwise)
        assert isinstance(topo2[0].op.scalar_op, theano.scalar.Mul)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_add(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x], T.add(x), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == deep_copy_op
        f2 = theano.function([x, y], T.add(x, y), mode=self.mode)
        assert numpy.all(f2(vx, vy) == vx + vy)
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 1
        assert isinstance(topo2[0].op, T.Elemwise)
        assert isinstance(topo2[0].op.scalar_op, theano.scalar.Add)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_broadcast2(self):
        # test switch(cst, vector, matrix)

        # This case is not optimized for now.
        x = theano.tensor.vector('x', dtype='int32')
        y = theano.tensor.matrix('y', dtype='int64')
        z = theano.tensor.switch(1, x, y)
        f = theano.function([x, y], z, mode=self.mode)
        assert len([node.op for node in f.maker.fgraph.toposort() if
                    isinstance(node.op, theano.tensor.Elemwise) and
                    not isinstance(node.op.scalar_op, theano.scalar.basic.Cast)]) == 0
        vx = numpy.array([4, 5, 6], dtype='int32')
        vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')
        assert numpy.all(f(vx, vy) == vx)

        z = theano.tensor.switch(0, x, y)
        f = theano.function([x, y], z, mode=self.mode)
        assert len([node.op for node in f.maker.fgraph.toposort() if
                    isinstance(node.op, theano.tensor.Elemwise)]) == 0
        vx = numpy.array([4, 5, 6], dtype='int32')
        vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')
        assert numpy.all(f(vx, vy) == vy)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_local_add_specialize():
    # test of non-zero dimension
    a = tensor.vector()
    s = tensor.add(tensor.zeros_like(a))
    assert local_add_specialize.transform(s.owner)

    # test of 0-d
    a = tensor.scalar()
    s = tensor.add(tensor.zeros_like(a))
    assert local_add_specialize.transform(s.owner)

    # Test when the 0 input is forcing upcasting
    a = tensor.constant(0, dtype='int64')
    b = tensor.constant(1, dtype='int32')
    s = a + b
    transformed = local_add_specialize.transform(s.owner)
    assert transformed
    assert transformed[0].type == s.type
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_local_scalar_tensor_scalar():
    dtypes = ['int8', 'int16', 'int32', 'int64',
            'uint8', 'uint16', 'uint32', 'uint64',
            'float32', 'float64',
            'complex64', 'complex128'
            ]

    for dtype in dtypes:
        s_type = theano.scalar.Scalar(dtype=dtype)
        s = s_type()
        t = tensor.tensor_from_scalar(s)
        s2 = tensor.scalar_from_tensor(t)

        f = function([s], s2, mode=mode_opt)
        e = f.maker.fgraph.toposort()
        cast_nodes = [n for n in e
                if isinstance(n.op, (tensor.TensorFromScalar,
                                     tensor.ScalarFromTensor))]
        assert len(cast_nodes) == 0
        f(0)
test_opt.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_local_zero_div():
    """Tests 0/x -> 0"""
    mode = theano.compile.mode.get_default_mode().including("local_zero_div")
    for t in (T.scalar, T.ivector, T.ftensor4):
        x = t('x')
        for op in (T.int_div, T.true_div):
            y = op(0, x)
            g = optimize(FunctionGraph([x], [y]))
            # the division should be gone
            divs = [node for node in g.toposort()
                    if isinstance(node.op, T.elemwise.Elemwise) and
                    isinstance(node.op.scalar_op, type(op.scalar_op))]
            assert len(divs) == 0
            # the output type should match the unoptimized one
            output = g.outputs[0]
            assert output.ndim == y.ndim
            assert output.type == y.type
            # and the output should be zero
            assert theano.tensor.get_scalar_constant_value(output) == 0
test_extra_ops.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_perform(self):
        x = tensor.matrix()
        y = tensor.scalar()
        f = function([x, y], fill_diagonal(x, y))
        for shp in [(8, 8), (5, 8), (8, 5)]:
            a = numpy.random.rand(*shp).astype(config.floatX)
            val = numpy.cast[config.floatX](numpy.random.rand())
            out = f(a, val)
            # We can't use numpy.fill_diagonal as it is bugged.
            assert numpy.allclose(numpy.diag(out), val)
            assert (out == val).sum() == min(a.shape)

        # test for 3d tensor
        a = numpy.random.rand(3, 3, 3).astype(config.floatX)
        x = tensor.tensor3()
        y = tensor.scalar()
        f = function([x, y], fill_diagonal(x, y))
        val = numpy.cast[config.floatX](numpy.random.rand() + 10)
        out = f(a, val)
        # We can't use numpy.fill_diagonal as it is bugged.
        assert out[0, 0, 0] == val
        assert out[1, 1, 1] == val
        assert out[2, 2, 2] == val
        assert (out == val).sum() == min(a.shape)
fxnn.py 文件源码 项目:fxnn 作者: khaotik 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_model(model_):
    global fn_predict, fn_record
    global g_ozer, g_mdl

    g_ozer = dict(simple=VanillaSGD, adam=AdamSGD)[OZER]()
    g_ozer.lr = LEARN_RATE

    s_x = T.tensor4('x')
    s_y = T.ivector('y')
    s_pdpo = T.scalar()
    s_out = model_(s_x, s_pdpo)

    s_y_onehot = T.extra_ops.to_one_hot(s_y, len(g_dataset.label_map))
    s_loss = T.mean(-s_y_onehot*T.log(s_out + 1e-3))
    s_accr = T.mean( T.switch(
            T.eq(T.argmax(s_out, axis=1), T.argmax(s_y_onehot, axis=1)), 1, 0))

    no_dropout = [(s_pdpo, T.constant(0., dtype=th.config.floatX))]
    fn_predict = th.function(
        [s_x, s_y],
        {'pred':s_out, 'accr':s_accr, 'loss':s_loss},
        givens=no_dropout, profile=PROFILE)
    rec_fetches = {
        'x': s_x, 'y': s_y,
        'pred': s_out}
    rec_fetches.update(g_mdl.params_di)
    fn_record = th.function(
        [s_x, s_y], rec_fetches, givens=no_dropout, profile=PROFILE)
    g_ozer.compile(
        [s_x, s_y],
        s_loss,
        g_mdl.params_di.values(),
        fetches_={'pred': s_out, 'loss': s_loss, 'accr': s_accr},
        givens_=[(s_pdpo, T.constant(TRAIN_PDPO, dtype=th.config.floatX))],
        profile_=PROFILE)
air.py 文件源码 项目:cortex 作者: rdevon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def step_infer(self, r, q, y, *params):
        '''Step inference function for IRVI.inference scan.

        Args:
            r: theano randomstream variable
            q: T.tensor. Current approximate posterior parameters
            y: T.tensor. Data sample
            params: list of shared variables
        Returns:
            q: T.tensor. New approximate posterior parameters
            cost: T.scalar float. Negative lower bound of current parameters
        '''

        model = self.model
        prior_params = model.get_prior_params(*params)

        h        = (r <= q[None, :, :]).astype(floatX)
        py       = model.p_y_given_h(h, *params)
        log_py_h = -model.conditional.neg_log_prob(y[None, :, :], py)
        log_ph   = -model.prior.step_neg_log_prob(h, *prior_params)
        log_qh   = -model.posterior.neg_log_prob(h, q[None, :, :])
        log_p     = log_py_h + log_ph - log_qh
        w_tilde = get_w_tilde(log_p)
        cost    = -log_p.mean()
        q_ = (w_tilde[:, :, None] * h).sum(axis=0)
        q  = self.inference_rate * q_ + (1 - self.inference_rate) * q
        return q, cost
training.py 文件源码 项目:cortex 作者: rdevon 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def set_optimizer(inputs, cost, tparams, constants, updates, extra_outs,
                  optimizer='sgd', optimizer_args=None,
                  **learning_args):
    '''Sets the parameter update functions with optimizer.

    Args:
        inputs (T.tensor): input variables.
        cost (T.scalar): cost
        tparams (OrderedDict): directionary of tensor parameters
        constants (list): list of constant tensors.
        updates (theano.OrderedUpdates): updates.
        extra_outs (list): list of extra output tensors.
        optimizer (Optional[str]): optimizer string. See `utils.op` for details.
            Defaults to `sgd`.
        optimizer_args (Optional[dict]): optional arguments for optimizer.
        **learning_args: extra kwargs for learning not used.

    Returns:
        theano.function: gradient function.
        theano.function: update function.
        dict: extra learning keyword arguments.

    '''

    if optimizer_args is None:
        optimizer_args = dict()
    grads = T.grad(cost, wrt=itemlist(tparams),
                   consider_constant=constants)

    updates = theano.OrderedUpdates(updates)

    lr = T.scalar(name='lr')
    f_grad_shared, f_grad_updates = eval('op.' + optimizer)(
        lr, tparams, grads, inputs, cost, extra_ups=updates,
        extra_outs=extra_outs, **optimizer_args)

    return f_grad_shared, f_grad_updates, learning_args


问题


面经


文章

微信
公众号

扫码关注公众号