python类ones_like()的实例源码

gradient_descent.py 文件源码 项目:dl4nlp 作者: yohokuno 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_adagrad(learning_rate=0.5):
    """
    Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
    John Duchi, Elad Hazan and Yoram Singer, Journal of Machine Learning Research 12 (2011) 2121-2159
    http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
    """
    sum_square_gradient = None

    def adagrad(gradient):
        nonlocal sum_square_gradient

        if sum_square_gradient is None:
            sum_square_gradient = np.ones_like(gradient)
        sum_square_gradient += gradient ** 2
        return learning_rate / np.sqrt(sum_square_gradient)

    return adagrad
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
samplers.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, dataset, reweighting, model, large_batch=1024,
                 forward_batch_size=128, steps_per_epoch=300, recompute=2,
                 s_e=(1, 1), n_epochs=1):
        super(OnlineBatchSelectionSampler, self).__init__(
            dataset,
            reweighting,
            model,
            large_batch=large_batch,
            forward_batch_size=forward_batch_size
        )

        # The configuration of OnlineBatchSelection
        self.steps_per_epoch = steps_per_epoch
        self.recompute = recompute
        self.s_e = s_e
        self.n_epochs = n_epochs

        # Mutable variables to be updated
        self._batch = 0
        self._epoch = 0
        self._raw_scores = np.ones((len(dataset.train_data),))
        self._scores = np.ones_like(self._raw_scores)
        self._ranks = np.arange(len(dataset.train_data))
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
samplers.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, dataset, reweighting, model, large_batch=1024,
                 forward_batch_size=128, steps_per_epoch=300, recompute=2,
                 s_e=(1, 1), n_epochs=1):
        super(OnlineBatchSelectionSampler, self).__init__(
            dataset,
            reweighting,
            model,
            large_batch=large_batch,
            forward_batch_size=forward_batch_size
        )

        # The configuration of OnlineBatchSelection
        self.steps_per_epoch = steps_per_epoch
        self.recompute = recompute
        self.s_e = s_e
        self.n_epochs = n_epochs

        # Mutable variables to be updated
        self._batch = 0
        self._epoch = 0
        self._raw_scores = np.ones((len(dataset.train_data),))
        self._scores = np.ones_like(self._raw_scores)
        self._ranks = np.arange(len(dataset.train_data))
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
samplers.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, dataset, reweighting, model, large_batch=1024,
                 forward_batch_size=128, steps_per_epoch=300, recompute=2,
                 s_e=(1, 1), n_epochs=1):
        super(OnlineBatchSelectionSampler, self).__init__(
            dataset,
            reweighting,
            model,
            large_batch=large_batch,
            forward_batch_size=forward_batch_size
        )

        # The configuration of OnlineBatchSelection
        self.steps_per_epoch = steps_per_epoch
        self.recompute = recompute
        self.s_e = s_e
        self.n_epochs = n_epochs

        # Mutable variables to be updated
        self._batch = 0
        self._epoch = 0
        self._raw_scores = np.ones((len(dataset.train_data),))
        self._scores = np.ones_like(self._raw_scores)
        self._ranks = np.arange(len(dataset.train_data))
test_keras.py 文件源码 项目:wtte-rnn 作者: ragulpr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_data(discrete_time):
    y_test, y_train, u_train = generate_weibull(A=real_a,
                                                B=real_b,
                                                # <np.inf -> impose censoring
                                                C=censoring_point,
                                                shape=[n_sequences,
                                                       n_timesteps, 1],
                                                discrete_time=discrete_time)
    # With random input it _should_ learn weight 0
    x_train = x_test = np.random.uniform(
        low=-1, high=1, size=[n_sequences, n_timesteps, n_features])

    # y_test is uncencored data
    y_test = np.append(y_test, np.ones_like(y_test), axis=-1)
    y_train = np.append(y_train, u_train, axis=-1)
    return y_train, x_train, y_test, x_test
test_cuda.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_ignore_nans(self):
        """ Test that NaNs are ignored. """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        source.append(np.full_like(source[0], np.nan))
        product = cprod(source, ignore_nan = True)
        self.assertTrue(np.allclose(product, np.ones_like(product)))
test_cuda.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def test_dtype(self):
        """ Test that dtype argument is working """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = cprod(source, dtype = np.int)
        self.assertTrue(np.allclose(product, np.ones_like(product)))
        self.assertEqual(product.dtype, np.int)
test_numerics.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_trivial(self):
        """ Test a product of ones """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = last(iprod(source))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
test_numerics.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_ignore_nans(self):
        """ Test that NaNs are ignored. """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        source.append(np.full_like(source[0], np.nan))
        product = last(iprod(source, ignore_nan = True))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
test_numerics.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_dtype(self):
        """ Test that dtype argument is working """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = last(iprod(source, dtype = np.int))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
        self.assertEqual(product.dtype, np.int)
test_numerics.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_trivial(self):
        """ Test a product of ones """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = last(inanprod(source))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
sia_to_triangular_mesh.py 文件源码 项目:tissue_analysis 作者: VirtualPlants 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def composed_triangular_mesh(triangular_mesh_dict):
    start_time = time()
    print "--> Composing triangular mesh..."

    mesh = TriangularMesh()

    triangle_cell_matching = {}

    mesh_points = np.concatenate([triangular_mesh_dict[c].points.keys() for c in triangular_mesh_dict.keys()])
    mesh_point_positions = np.concatenate([triangular_mesh_dict[c].points.values() for c in triangular_mesh_dict.keys()])
    mesh.points = dict(zip(mesh_points,mesh_point_positions))

    mesh_triangles = np.concatenate([triangular_mesh_dict[c].triangles.values() for c in triangular_mesh_dict.keys()])
    mesh.triangles = dict(zip(np.arange(len(mesh_triangles)),mesh_triangles))

    mesh_cells = np.concatenate([c*np.ones_like(triangular_mesh_dict[c].triangles.keys()) for c in triangular_mesh_dict.keys()])
    triangle_cell_matching = dict(zip(np.arange(len(mesh_triangles)),mesh_cells))


    # for c in triangular_mesh_dict.keys():
    #     cell_start_time = time()

    #     cell_mesh = triangular_mesh_dict[c]
    #     # mesh_point_max_id = np.max(mesh.points.keys()) if len(mesh.points)>0 else 0
    #     mesh.points.update(cell_mesh.points)

    #     if len(cell_mesh.triangles)>0:
    #         mesh_triangle_max_id = np.max(mesh.triangles.keys()) if len(mesh.triangles)>0 else 0
    #         mesh.triangles.update(dict(zip(list(np.array(cell_mesh.triangles.keys())+mesh_triangle_max_id),cell_mesh.triangles.values())))
    #         triangle_cell_matching.update(dict(zip(list(np.array(cell_mesh.triangles.keys())+mesh_triangle_max_id),[c for f in cell_mesh.triangles]))) 

    #     cell_end_time = time()
    #     print "  --> Adding cell ",c," (",len(cell_mesh.triangles)," triangles )    [",cell_end_time-cell_start_time,"s]"

    end_time = time()
    print "<-- Composing triangular mesh     [",end_time-start_time,"]"
    return mesh, triangle_cell_matching
xgboost.py 文件源码 项目:search-MjoLniR 作者: wikimedia 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _loess_predict(X, y_tr, X_pred, bandwidth):
    X_tr = np.column_stack((np.ones_like(X), X))
    X_te = np.column_stack((np.ones_like(X_pred), X_pred))
    y_te = []
    for x in X_te:
        ws = np.exp(-np.sum((X_tr - x)**2, axis=1) / (2 * bandwidth**2))
        W = scipy.sparse.dia_matrix((ws, 0), shape=(X_tr.shape[0],) * 2)
        theta = np.linalg.pinv(X_tr.T.dot(W.dot(X_tr))).dot(X_tr.T.dot(W.dot(y_tr)))
        y_te.append(np.dot(x, theta))
    return np.array(y_te)
keras_utils.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, X, Y, batch_size, cropsize=0):

        assert len(X) == len(Y), 'X and Y must be the same length {}!={}'.format(len(X),len(Y))
        print('starting balanced generator')
        self.X = X
        self.Y = Y
        self.cropsize=cropsize
        self.batch_size = int(batch_size)
        self.pmatrix = np.ones_like(self.Y)
        self.reset()
gamma_fullsum_nonapprox.py 文件源码 项目:seqhawkes 作者: mlukasik 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def gamma_fullsum_grad(
    gamma,
    node_vec,
    eventmemes,
    etimes,
    T,
    mu,
    alpha,
    omega,
    W,
    beta,
    kernel_evaluate,
    K_evaluate,
    ):
    '''
    it actually returns negated gradient.
    '''

    gradres = np.ones_like(gamma) * -T * np.sum(mu)
    for (eventidx, (etime1, infected_u, eventmeme)) in \
        enumerate(izip(etimes, node_vec, eventmemes)):
        gradres[eventmeme] += mu[infected_u] \
            / np.exp(event_nonapproximated_logintensity(
            infected_u,
            eventmeme,
            etime1,
            T,
            etimes[:eventidx],
            node_vec[:eventidx],
            eventmemes[:eventidx],
            mu,
            gamma,
            omega,
            alpha,
            kernel_evaluate,
            ))
    return -gradres


# =====
math.py 文件源码 项目:Optimizer-cotw 作者: alkaya 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def relu(x, deriv=False):
        '''
        Rectifier function
        :param x: np.array
        :param deriv: derivate wanted ?
        :return:
        '''
        if deriv:
            return np.ones_like(x) * (x > 0)

        return x * (x > 0)


问题


面经


文章

微信
公众号

扫码关注公众号