python类std()的实例源码

image_utils.py 文件源码 项目:acdc_segmenter 作者: baumgach 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def normalise_images(X):
    '''
    Helper for making the images zero mean and unit standard deviation i.e. `white`
    '''

    X_white = np.zeros(X.shape, dtype=np.float32)

    for ii in range(X.shape[0]):

        Xc = X[ii,:,:,:]
        mc = Xc.mean()
        sc = Xc.std()

        Xc_white = np.divide((Xc - mc), sc)

        X_white[ii,:,:,:] = Xc_white

    return X_white.astype(np.float32)
main.py 文件源码 项目:FaceSwap 作者: Aravind-Suresh 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_tm_opp(pts1, pts2):
    # Transformation matrix - ( Translation + Scaling + Rotation )
    # using Procuster analysis
    pts1 = np.float64(pts1)
    pts2 = np.float64(pts2)

    m1 = np.mean(pts1, axis = 0)
    m2 = np.mean(pts2, axis = 0)

    # Removing translation
    pts1 -= m1
    pts2 -= m2

    std1 = np.std(pts1)
    std2 = np.std(pts2)
    std_r = std2/std1

    # Removing scaling
    pts1 /= std1
    pts2 /= std2

    U, S, V = np.linalg.svd(np.transpose(pts1) * pts2)

    # Finding the rotation matrix
    R = np.transpose(U * V)

    return np.vstack([np.hstack((std_r * R,
        np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])
pylspm.py 文件源码 项目:pylspm 作者: lseman 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def normaliza(self, X):
        correction = np.sqrt((len(X) - 1) / len(X))  # std factor corretion
        mean_ = np.mean(X, 0)
        scale_ = np.std(X, 0)
        X = X - mean_
        X = X / (scale_ * correction)
        return X
pylspm.py 文件源码 项目:pylspm 作者: lseman 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def dataInfo(self):
        sd_ = np.std(self.data, 0)
        mean_ = np.mean(self.data, 0)
        skew = scipy.stats.skew(self.data)
        kurtosis = scipy.stats.kurtosis(self.data)
        w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
             for i in range(len(self.data.columns))]

        return [mean_, sd_, skew, kurtosis, w]
plsr2.py 文件源码 项目:pylspm 作者: lseman 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def normaliza(X):
    mean_ = np.mean(X, 0)
    scale_ = np.std(X, 0)
    X = X - mean_
    X = X / (scale_)
    return X

# FOC = preditors (X)
# HOC = response (Y)
# T as scores
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def bench_on(runner, sym, Ns, trials, dtype=None):
    global args, kernel, out, mkl_layer
    prepare = globals().get("prepare_"+sym, prepare_default)
    kernel  = globals().get("kernel_"+sym, None)
    if not kernel:
       kernel = getattr(np.linalg, sym)
    out_lvl = runner.__doc__.split('.')[0].strip()
    func_s  = kernel.__doc__.split('.')[0].strip()
    log.debug('Preparing input data for %s (%s).. ' % (sym, func_s))
    args = [prepare(int(i)) for i in Ns]
    it = range(len(Ns))
    # pprint(Ns)
    out = np.empty(shape=(len(Ns), trials))
    b = body(trials)
    tic, toc = (0, 0)
    log.debug('Warming up %s (%s).. ' % (sym, func_s))
    runner(range(1000), empty_work)
    kernel(*args[0])
    runner(range(1000), empty_work)
    log.debug('Benchmarking %s on %s: ' % (func_s, out_lvl))
    gc_old = gc.isenabled()
#    gc.disable()
    tic = time.time()
    runner(it, b)
    toc = time.time() - tic
    if gc_old:
        gc.enable()
    if 'reused_pool' in globals():
        del globals()['reused_pool']

    #calculate average time and min time and also keep track of outliers (max time in the loop)
    min_time = np.amin(out)
    max_time = np.amax(out)
    mean_time = np.mean(out)
    stdev_time = np.std(out)

    #print("Min = %.5f, Max = %.5f, Mean = %.5f, stdev = %.5f " % (min_time, max_time, mean_time, stdev_time))
    #final_times = [min_time, max_time, mean_time, stdev_time]

    print('## %s: Outter:%s, Inner:%s, Wall seconds:%f\n' % (sym, out_lvl, mkl_layer, float(toc)))
    return out
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)

    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)

    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)

    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):

        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0

        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])

    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean
AE_training.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def std_normalize(batch):
    norm_batch = np.zeros(batch.shape)
    for i in range(len(batch)):
        norm_batch[i] = (batch[i] - np.mean(batch[i])) / np.std(batch[i])
    return norm_batch

# Argument parser. This script expects 2 necessory positional args.
image_as_mod3d_2dmask.py 文件源码 项目:kaggle_dsb2017 作者: astoc 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def standardize(self, x):
        if self.preprocessing_function:
            x = self.preprocessing_function(x)
        if self.rescale:
            x *= self.rescale
        # x is a single image, so it doesn't have image number at index 0
        img_channel_axis = self.channel_axis - 1
        if self.samplewise_center:
            x -= np.mean(x, axis=img_channel_axis, keepdims=True)
        if self.samplewise_std_normalization:
            x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)

        if self.featurewise_center:
            if self.mean is not None:
                x -= self.mean
            else:
                warnings.warn('This ImageDataGenerator specifies '
                              '`featurewise_center`, but it hasn\'t'
                              'been fit on any training data. Fit it '
                              'first by calling `.fit(numpy_data)`.')
        if self.featurewise_std_normalization:
            if self.std is not None:
                x /= (self.std + 1e-7)
            else:
                warnings.warn('This ImageDataGenerator specifies '
                              '`featurewise_std_normalization`, but it hasn\'t'
                              'been fit on any training data. Fit it '
                              'first by calling `.fit(numpy_data)`.')
        if self.zca_whitening:
            if self.principal_components is not None:
                flatx = np.reshape(x, (x.size))
                whitex = np.dot(flatx, self.principal_components)
                x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
            else:
                warnings.warn('This ImageDataGenerator specifies '
                              '`zca_whitening`, but it hasn\'t'
                              'been fit on any training data. Fit it '
                              'first by calling `.fit(numpy_data)`.')
        return x
get_data.py 文件源码 项目:Doubly-Stochastic-DGP 作者: ICL-SML 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_regression_data(name, split, data_path=data_path):
    path = '{}{}.csv'.format(data_path, name)

    if not os.path.isfile(path):
        download(name +'.csv', data_path=data_path)

    data = pandas.read_csv(path, header=None).values

    if name in ['energy', 'naval']:
        # there are two Ys for these, but take only the first
        X_full = data[:, :-2]
        Y_full = data[:, -2]
    else:
        X_full = data[:, :-1]
        Y_full = data[:, -1]


    X, Y, Xs, Ys = make_split(X_full, Y_full, split)

    ############# whiten inputs 
    X_mean, X_std = np.average(X, 0), np.std(X, 0)+1e-6

    X = (X - X_mean)/X_std
    Xs = (Xs - X_mean)/X_std

    return  X, Y[:, None], Xs, Ys[:, None]
test_stats.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_against_numpy_std(self):
        stream = [np.random.random((16, 7, 3)) for _ in range(10)]
        stack = np.stack(stream, axis = -1)

        with catch_warnings():
            simplefilter('ignore')
            for axis in (0, 1, 2, None):
                for ddof in range(4):
                    with self.subTest('axis = {}, ddof = {}'.format(axis, ddof)):
                        from_numpy = np.std(stack, axis = axis, ddof = ddof)
                        from_ivar = last(istd(stream, axis = axis, ddof = ddof))
                        self.assertSequenceEqual(from_numpy.shape, from_ivar.shape)
                        self.assertTrue(np.allclose(from_ivar, from_numpy))
cumulative.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_volatility(self, daily_returns):
        if len(daily_returns) <= 1:
            return 0.0
        return np.std(daily_returns, ddof=1) * math.sqrt(252)
period.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def calculate_volatility(self, daily_returns):
        return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
risk.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def downside_risk(algorithm_returns, mean_returns, normalization_factor):
    rets = algorithm_returns.round(8)
    mar = mean_returns.round(8)
    mask = rets < mar
    downside_diff = rets[mask] - mar[mask]
    if len(downside_diff) <= 1:
        return 0.0
    return np.std(downside_diff, ddof=1) * math.sqrt(normalization_factor)
test_transforms.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_stddev(context, data):
        """
        Tests the stddev transform by manually keeping track of the prices
        in a naiive way and asserting that our stddev is the same.
        This accounts for the corrected ddof.
        """
        mins = sum(context.mins_for_days[-context.days:])

        for sid in data:
            assert_allclose(
                data[sid].stddev(context.days),
                np.std(context.price_bars[sid][-mins:], ddof=1),
            )
stats.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def summarize_bootstrapped_top_n(top_n_boot):
    top_n_bcs_mean = np.mean(top_n_boot)
    top_n_bcs_sd = np.std(top_n_boot)
    top_n_bcs_var = np.var(top_n_boot)
    result = {}
    result['filtered_bcs_var'] = top_n_bcs_var
    result['filtered_bcs_cv'] = tk_stats.robust_divide(top_n_bcs_sd, top_n_bcs_mean)
    result['filtered_bcs_lb'] = round(scipy.stats.norm.ppf(0.025, top_n_bcs_mean, top_n_bcs_sd))
    result['filtered_bcs_ub'] = round(scipy.stats.norm.ppf(0.975, top_n_bcs_mean, top_n_bcs_sd))
    result['filtered_bcs'] = round(top_n_bcs_mean)
    return result
HiPMDP.py 文件源码 项目:hip-mdp-public 作者: dtak 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __compute_bnn_training_error(self):
        """Compute BNN training error on most recent episode."""
        exp = np.reshape(self.episode_buffer_bnn, (len(self.episode_buffer_bnn),-1))
        episode_X = np.array([np.hstack([exp[tt,0],exp[tt,1]]) for tt in xrange(exp.shape[0])])
        episode_Y = np.array([exp[tt,3] for tt in xrange(exp.shape[0])])
        if self.state_diffs:
            # subtract previous state
            episode_Y -= episode_X[:,:self.num_dims]
        l2_errors = self.network.get_td_error(np.hstack([episode_X, np.tile(self.weight_set, (episode_X.shape[0],1))]), episode_Y, 0.0, 1.0)
        self.mean_episode_errors[self.instance_iter,self.episode_iter] = np.mean(l2_errors)
        self.std_episode_errors[self.instance_iter,self.episode_iter] = np.std(l2_errors)
        if self.print_output:
            print('BNN Error: {}'.format(self.mean_episode_errors[self.instance_iter,self.episode_iter]))
sampler.py 文件源码 项目:bnn-analysis 作者: myshkov 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __repr__(self):
        s = f'Sampler: {self.sampler_type}\n'
        s += f'Train size: {self.train_size}\n'
        s += f'Test size: {self.test_size}\n'
        s += f'Normalise: {self.normalise_data}\n'
        s += f'X: mean={self.train_x_mean}, std={self.train_x_std}\n'
        s += f'Y: mean={self.train_y_mean}, std={self.train_y_std}\n'
        return s


问题


面经


文章

微信
公众号

扫码关注公众号