python类nanmean()的实例源码

score.py 文件源码 项目:cnn_polyp_detection 作者: odysszis 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
metrics.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_scores(self):
        """Returns accuracy score evaluation result.
            - overall accuracy
            - mean accuracy
            - mean IU
            - fwavacc
        """
        hist = self.confusion_matrix
        acc = np.diag(hist).sum() / hist.sum()
        acc_cls = np.diag(hist) / hist.sum(axis=1)
        acc_cls = np.nanmean(acc_cls)
        iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
        mean_iu = np.nanmean(iu)
        freq = hist.sum(axis=1) / hist.sum()
        fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
        cls_iu = dict(zip(range(self.n_classes), iu))

        return {'Overall Acc: \t': acc,
                'Mean Acc : \t': acc_cls,
                'FreqW Acc : \t': fwavacc,
                'Mean IoU : \t': mean_iu,}, cls_iu
tensorFlowNetwork.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def getOverallResults(self):
        if self.multilabel:
            accs = [0] * len(self.optimize_labels)
            aucs = [0] * len(self.optimize_labels)
            f1s = [0] * len(self.optimize_labels)
            precisions = [0] * len(self.optimize_labels)
            recalls = [0] * len(self.optimize_labels)

            for i in range(len(self.optimize_labels)):
                accs[i] = self.training_val_results['acc'][self.optimize_labels[i]][-1]
                aucs[i] = self.training_val_results['auc'][self.optimize_labels[i]][-1]
                f1s[i] = self.training_val_results['f1'][self.optimize_labels[i]][-1]
                precisions[i] = self.training_val_results['precision'][self.optimize_labels[i]][-1]
                recalls[i] = self.training_val_results['recall'][self.optimize_labels[i]][-1]
            return np.nanmean(accs), np.nanmean(aucs), np.nanmean(f1s), np.nanmean(precisions), np.nanmean(recalls)
        else:
            acc = self.training_val_results['acc'][-1]
            auc = self.training_val_results['auc'][-1]
            f1 = self.training_val_results['f1'][-1]
            precision = self.training_val_results['precision'][-1]
            recall = self.training_val_results['recall'][-1]

        return acc, auc, f1, precision, recall
tensorFlowNetworkMultiTask.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def getOverallResults(self, average_over_tasks=False):
        if average_over_tasks:
            accs = [0] * len(self.optimize_labels)
            aucs = [0] * len(self.optimize_labels)
            f1s = [0] * len(self.optimize_labels)
            precisions = [0] * len(self.optimize_labels)
            recalls = [0] * len(self.optimize_labels)

            for i in range(len(self.optimize_labels)):
                accs[i] = self.training_val_results_per_task['acc'][self.optimize_labels[i]][-1]
                aucs[i] = self.training_val_results_per_task['auc'][self.optimize_labels[i]][-1]
                f1s[i] = self.training_val_results_per_task['f1'][self.optimize_labels[i]][-1]
                precisions[i] = self.training_val_results_per_task['precision'][self.optimize_labels[i]][-1]
                recalls[i] = self.training_val_results_per_task['recall'][self.optimize_labels[i]][-1]
            return np.nanmean(accs), np.nanmean(aucs), np.nanmean(f1s), np.nanmean(precisions), np.nanmean(recalls)
        else:
            acc = self.training_val_results['acc'][-1]
            auc = self.training_val_results['auc'][-1]
            f1 = self.training_val_results['f1'][-1]
            precision = self.training_val_results['precision'][-1]
            recall = self.training_val_results['recall'][-1]

        return acc, auc, f1, precision, recall
HBLRWrapper.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def getValidationResults(self, results_dict):
        self.classifier.trainUntilConverged()
        results_dict['num_clusters'] = self.classifier.K

        if self.users_as_tasks:
            val_acc, val_auc = self.getAccuracyAucOnAllTasks(self.val_tasks)
            results_dict['val_acc'] = val_acc
            results_dict['val_auc'] = val_auc
        else:
            accs = []
            aucs = []
            for t in range(self.n_tasks):
                acc, auc = self.getAccuracyAucOnOneTask(self.val_tasks, t)
                task_name = self.val_tasks[t]['Name']
                results_dict['TaskAcc-' + helper.getFriendlyLabelName(task_name)] = acc
                results_dict['TaskAuc-' + helper.getFriendlyLabelName(task_name)] = auc
                if task_name in self.optimize_labels:
                    accs.append(acc)
                    aucs.append(auc)
            results_dict['val_acc'] = np.nanmean(accs)
            results_dict['val_auc'] = np.nanmean(aucs)
        return results_dict
snpmatch.py 文件源码 项目:SNPmatch 作者: Gregor-Mendel-Institute 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def CaseInterpreter(overlap, NumSNPs, topHits, probScore):
  overlap_thres = 0.5
  num_lines = len(probScore)
  case = 10
  if len(topHits) == 1:
    case = 0
    note = "Unique hit"
  elif np.nanmean(probScore[topHits]) > prob_thres:
    case = 2
    note = "Ambiguous sample: Accessions in top hits can be really close"
  elif overlap > overlap_thres:
    case = 3
    note = "Ambiguous sample: Sample might contain mixture of DNA or contamination"
  elif overlap < overlap_thres:
    case = 4
    note = "Ambiguous sample: Overlap of SNPs is very low, sample may not be in database"
  if case > 4:
    case = 1
    note = "Ambiguous sample"
  return (case, note)
vis_corex.py 文件源码 项目:LinearCorex 作者: gregversteeg 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def plot_heatmaps(data, mis, column_label, cont, topk=30, prefix=''):
    cmap = sns.cubehelix_palette(as_cmap=True, light=.9)
    m, nv = mis.shape
    for j in range(m):
        inds = np.argsort(- mis[j, :])[:topk]
        if len(inds) >= 2:
            plt.clf()
            order = np.argsort(cont[:,j])
            subdata = data[:, inds][order].T
            subdata -= np.nanmean(subdata, axis=1, keepdims=True)
            subdata /= np.nanstd(subdata, axis=1, keepdims=True)
            columns = [column_label[i] for i in inds]
            sns.heatmap(subdata, vmin=-3, vmax=3, cmap=cmap, yticklabels=columns, xticklabels=False, mask=np.isnan(subdata))
            filename = '{}/heatmaps/group_num={}.png'.format(prefix, j)
            if not os.path.exists(os.path.dirname(filename)):
                os.makedirs(os.path.dirname(filename))
            plt.title("Latent factor {}".format(j))
            plt.yticks(rotation=0)
            plt.savefig(filename, bbox_inches='tight')
            plt.close('all')
            #plot_rels(data[:, inds], map(lambda q: column_label[q], inds), colors=cont[:, j],
            #          outfile=prefix + '/relationships/group_num=' + str(j), latent=labels[:, j], alpha=0.1)
normalized_distance.py 文件源码 项目:knnimpute 作者: hammerlab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def all_pairs_normalized_distances_reference(X):
    """
    Reference implementation of normalized all-pairs distance, used
    for testing the more efficient implementation above for equivalence.
    """
    n_samples, n_cols = X.shape
    # matrix of mean squared difference between between samples
    D = np.ones((n_samples, n_samples), dtype="float32") * np.inf
    for i in range(n_samples):
        diffs = X - X[i, :].reshape((1, n_cols))
        missing_diffs = np.isnan(diffs)
        missing_counts_per_row = missing_diffs.sum(axis=1)
        valid_rows = missing_counts_per_row < n_cols
        D[i, valid_rows] = np.nanmean(
            diffs[valid_rows, :] ** 2,
            axis=1)
    return D
score.py 文件源码 项目:fcn 作者: wkentaro 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
utils.py 文件源码 项目:fcn 作者: wkentaro 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def label_accuracy_score(label_trues, label_preds, n_class):
    """Returns accuracy score evaluation result.

      - overall accuracy
      - mean accuracy
      - mean IU
      - fwavacc
    """
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    return acc, acc_cls, mean_iu, fwavacc


# -----------------------------------------------------------------------------
# Visualization
# -----------------------------------------------------------------------------
mr.py 文件源码 项目:pyspc 作者: carlosqsilva 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def plot(self, data, size, newdata=None):
        assert size == 1
        newvalues = None

        R = np.array([np.nan] + [abs(data[i] - data[i + 1]) for i in range(len(data) - 1)])

        if newdata:
            newdata = data[-1:] + newdata
            n = len(newdata)
            newvalues = [abs(newdata[i] - newdata[i + 1]) for i in range(n - 1)]

        Rbar = np.nanmean(R)

        lclr = D3[2] * Rbar
        uclr = D4[2] * Rbar

        return (R, Rbar, lclr, uclr, self._title)
mr.py 文件源码 项目:pyspc 作者: carlosqsilva 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def plot(self, data, size, newdata=None):
        assert size == 1
        newvalues = None

        R = np.array([np.nan] + [abs(data[i] - data[i + 1]) for i in range(len(data) - 1)])

        if newdata:
            newvalues = newdata

        Rbar = np.nanmean(R)
        Xbar = np.mean(data)

        lclx = Xbar - 3 * (Rbar / d2[2])
        uclx = Xbar + 3 * (Rbar / d2[2])

        return (data, Xbar, lclx, uclx, self._title)
windeval.py 文件源码 项目:POWER 作者: pennelise 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def hourly_wind_speed(wind_speeds, times):
    """Average wind speed over hours and return a 1x24 numpy array.

    Arguments:
    wind_speeds -- a np array of all wind speeds
    times -- a np array of all times with indexes corresponding to wind_speeds
    """
    avg_hourly_ws = []
    new_times = []
    hours = np.array([t.hour for t in times]) #Make an array of just the hours.
    for i in range(24):
        avg_hourly_ws.append(np.nanmean(wind_speeds[hours == i]))
        new_times.append(i)
    return np.array(new_times), np.array(avg_hourly_ws) #Return the wind speeds and their corresponding times as a NumPy array

#Gets average wind dir for each hour of the day (returns 24h averaged over multiple days)
bcic_iv_2a.py 文件源码 项目:braindecode 作者: robintibor 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def extract_data(self):
        raw_edf = mne.io.read_raw_edf(self.filename, stim_channel='auto')
        raw_edf.load_data()
        # correct nan values

        data = raw_edf.get_data()

        # do not correct stimulus channel
        assert raw_edf.ch_names[-1] == 'STI 014'
        for i_chan in range(data.shape[0] - 1):
            # first set to nan, than replace nans by nanmean.
            this_chan = data[i_chan]
            data[i_chan] = np.where(this_chan == np.min(this_chan),
                                    np.nan, this_chan)
            mask = np.isnan(data[i_chan])
            chan_mean = np.nanmean(data[i_chan])
            data[i_chan, mask] = chan_mean
        gdf_events = raw_edf.find_edf_events()
        raw_edf = mne.io.RawArray(data, raw_edf.info, verbose='WARNING')
        # remember gdf events
        raw_edf.info['gdf_events'] = gdf_events
        return raw_edf
utils.py 文件源码 项目:real_time_face_detection 作者: Snowapril 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_dataset():
    if(not os.path.exists("./dataset/training.csv")):
        print("dataset does not exist")
        raise Exception

    #load dataset
    labeled_image = pd.read_csv("./dataset/training.csv")

    #preprocessing dataframe
    image = np.array(labeled_image["Image"].values).reshape(-1,1)
    image = np.apply_along_axis(lambda img: (img[0].split()),1,image)
    image = image.astype(np.int32) #because train_img elements are string before preprocessing
    image = image.reshape(-1,96*96) # data 96 * 96 size image

    label = labeled_image.values[:,:-1]
    label = label.astype(np.float32)

    #nan value to mean value
    col_mean = np.nanmean(label, axis=0)
    indices = np.where(np.isnan(label))
    label[indices] = np.take(col_mean, indices[1])

    return image, label
score.py 文件源码 项目:Seg-with-SPN 作者: JingchunCheng 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
callbacks.py 文件源码 项目:vinci 作者: Phylliade 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def on_episode_end(self, episode, logs):
        duration = timeit.default_timer() - self.starts[episode]

        metrics = self.metrics[episode]
        if np.isnan(metrics).all():
            mean_metrics = np.array([np.nan for _ in self.metrics_names])
        else:
            mean_metrics = np.nanmean(metrics, axis=0)
        assert len(mean_metrics) == len(self.metrics_names)

        data = list(zip(self.metrics_names, mean_metrics))
        data += list(logs.items())
        data += [('episode', episode), ('duration', duration)]
        for key, value in data:
            if key not in self.data:
                self.data[key] = []
            self.data[key].append(value)

        if self.interval is not None and episode % self.interval == 0:
            self.save_data()

        # Clean up.
        del self.metrics[episode]
        del self.starts[episode]
misc.py 文件源码 项目:wtte-rnn 作者: ragulpr 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def timeline_aggregate_plot(padded, title='', cmap="jet", plot=True):
    fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True,
                           sharey=False, figsize=(12, 8))

    fig, ax[0] = timeline_plot(
        padded, title, cmap=cmap, plot=False, fig=fig, ax=ax[0])

    ax[1, 0].plot(np.nanmean(padded, axis=0), lw=0.5,
                  c='black', drawstyle='steps-post')
    ax[1, 0].set_title('mean/timestep')
    padded = tr.right_pad_to_left_pad(padded)
    ax[1, 1].plot(np.nanmean(padded, axis=0), lw=0.5,
                  c='black', drawstyle='steps-post')
    ax[1, 1].set_title('mean/timestep')

    fig.suptitle(title, fontsize=14)
    if plot:
        fig.show()
        return None, None
    else:
        return fig, ax
test_graynet.py 文件源码 项目:graynet 作者: raamana 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_run_roi_stats_via_API():
    "Tests whether roi stats can be computed (not their accuracy) and the return values match in size."

    summary_methods = ['median', 'mean', 'std', 'variation', 'entropy', 'skew', 'kurtosis']
    # 'mode' returns more than one value; 'gmean' requires only positive values,
    # 'hmean' can not always be computed
    from scipy.stats import  trim_mean, kstat
    from functools import partial
    trimmed_mean = partial(trim_mean, proportiontocut=0.05)
    third_kstat = partial(kstat, n=3)

    summary_methods.extend([trimmed_mean, third_kstat])
    # checking support for nan-handling callables
    summary_methods.extend([np.nanmedian, np.nanmean])

    for summary_method in summary_methods:
        roi_medians = graynet.roiwise_stats_indiv(subject_id_list, fs_dir, base_feature=base_feature,
                                                  chosen_roi_stats=summary_method, atlas=atlas,
                                                  smoothing_param=fwhm, out_dir=out_dir, return_results=True)
        for sub in subject_id_list:
            if roi_medians[sub].size != num_roi_wholebrain:
                raise ValueError('invalid summary stats - #nodes do not match.')
score.py 文件源码 项目:NYUD-FCN8s 作者: yxliwhu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
score.py 文件源码 项目:NYUD-FCN8s 作者: yxliwhu 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
score.py 文件源码 项目:NYUD-FCN8s 作者: yxliwhu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
temp.py 文件源码 项目:ReinforcementL_trading 作者: zhangbppku8663 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def add_MACD(data, Ns=[12,26,9]):
    '''
    :param data: DataFrame containing stock price info in the second column
    :param Ns: List of short term long term EMA to use and look-back window of MACD's EMA
    :return:
    '''
    symbol = data.columns.values[1]  # assuming stock price is in the second column in data
    MACD = cal_EMA(data.ix[:,[symbol]],N=Ns[0]) - cal_EMA(data.ix[:,[symbol]],N=Ns[1])
    data['MACD'] = MACD
    signal = cal_EMA(data.MACD[Ns[1]:],N=Ns[2])
    # # normalized them
    # MACD = (MACD - np.nanmean(MACD))/(2*np.nanstd(MACD))
    # signal  = (signal - np.nanmean(signal))/(2*np.nanstd(signal))
    data['MACD'] = MACD
    data['Signal'] = 'NaN'
    data.loc[Ns[1]:,'Signal'] = signal

    return data
util.py 文件源码 项目:ReinforcementL_trading 作者: zhangbppku8663 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def add_MACD(data, Ns=None):
    '''
    :param data: DataFrame containing stock price info in the second column
    :param Ns: List of short term long term EMA to use and look-back window of MACD's EMA
    :return:
    '''
    if Ns is None:
        Ns = [12, 26, 9]
    symbol = data.columns.values[1]  # assuming stock price is in the second column in data
    MACD = cal_EMA(data.loc[:, symbol], N=Ns[0]) - cal_EMA(data.loc[:, symbol], N=Ns[1])
    data['MACD'] = MACD
    signal = cal_EMA(data.MACD[Ns[1]:], N=Ns[2])
    # # normalized them
    # MACD = (MACD - np.nanmean(MACD))/(2*np.nanstd(MACD))
    # signal  = (signal - np.nanmean(signal))/(2*np.nanstd(signal))
    # data['MACD'] = MACD
    data['Signal'] = 'NaN'
    data.loc[Ns[1]:, 'Signal'] = signal

    return data
plot_spectra.py 文件源码 项目:DR1_analysis 作者: GBTAmmoniaSurvey 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def mean_spectra(region,line,file_extension,restFreq,spec_param):
    '''
    Sum spectra over entire mapped region
    Cubes are missing BUNIT header parameter. Fix. 
    '''
    filein = '{0}/0{}_{1}_{2}_trim.fits'.format(region,line,file_extension)
    #add_fits_units(filein,'K')
    cube = SpectralCube.read(filein)
    #trim_edge_cube(cube)
    slice_unmasked = cube.unmasked_data[:,:,:]
    if line == 'NH3_33':
        slice_unmasked[spec_param['mask33_chans'][0]:spec_param['mask33_chans'][1],:,:]=0.
    summed_spectrum = np.nanmean(slice_unmasked,axis=(1,2))
    cube2 = cube.with_spectral_unit(u.km/u.s,velocity_convention='radio',
                                    rest_value=restFreq*u.GHz)
    return summed_spectrum, cube2.spectral_axis
metrics.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def meaniou(self, predictor, predict_dir, image_size):
        segparams = util.SegParams()
        classes = segparams.feature_classes().values()
        num_classes = len(classes) + 1
        hist = np.zeros((num_classes, num_classes))
        image_names = [filename.strip() for filename in os.listdir(
            predict_dir) if filename.endswith('.jpg')]
        for image_filename in image_names:
            final_prediction_map = predictor.predict(
                os.path.join(predict_dir, image_filename))
            final_prediction_map = final_prediction_map.transpose(
                0, 2, 1).squeeze()
            gt_name = os.path.join(predict_dir,
                                   image_filename[:-4] + '_final_mask' + '.png')
            gt = convert(gt_name, image_size)
            gt = np.asarray(gt)
            gt = convert_labels(gt, image_size, image_size)
            hist += compute_hist(gt, final_prediction_map,
                                 num_classes=num_classes)
        iou = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
        meaniou = np.nanmean(iou)

        return meaniou
Experiment.py 文件源码 项目:FLASH 作者: yuyuz 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_arg_best(self):
        best_idx = -1
        best_value = sys.maxint
        for i, trial in enumerate(self.trials):
            tmp_res = np.NaN
            if np.isfinite(trial['result']):
                tmp_res = trial['result']
            elif np.isfinite(trial['instance_results']).any():
                tmp_res = wrapping_util.nan_mean(trial['instance_results'])
                # np.nanmean is not available in older numpy versions
                # tmp_res = scipy.nanmean(trial['instance_results'])
            else:
                continue
            if tmp_res < best_value:
                best_idx = i
                best_value = tmp_res
        if best_idx == -1:
            raise ValueError("No best value found.")
        return best_idx

    # Get the best value so far, for more documentation see get_arg_best
score.py 文件源码 项目:testing-fcn-for-cityscapes 作者: simonguist 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, n_dataset, layer='score', gt='label'):
    print 'do seg tests'
    print '........................'
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, n_dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
histogram.py 文件源码 项目:gullikson-scripts 作者: kgullikson88 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def lnlike(self, pars):
        # Pull theta out of pars
        theta = pars[:self.Nbins]

        # Generate the inner summation
        gamma = np.ones_like(self.bin_idx) * np.nan
        good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0)  # nans in q get put in nonexistent bins
        gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]]
        summation = np.nanmean(gamma, axis=1)

        # Calculate the integral
        I = self._integral_fcn(theta)

        # Generate the log-likelihood
        ll = -I + np.nansum(np.log(summation))
        return ll
Sensitivity.py 文件源码 项目:gullikson-scripts 作者: kgullikson88 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def summarize_sensitivity(sens_df):
    """
    Summarize the sensitivity analysis by finding the detection rate and average significance
    as a function of teff and vsini

    Parameters:
    ===========
    - sens_df:   pandas DataFrame
                 The DataFrame such as generated by read_hdf5

    Returns:
    ========
    A pandas dataframe with the summary
    """
    cols = ['star', 'date', '[Fe/H]', 'logg', 'addmode', 'temperature', 'vsini']
    detrate = sens_df.groupby(cols).apply(lambda d: (d.significance > 5).sum() / float(len(d)))
    detrate = detrate.reset_index().rename(columns={0: 'detrate'})
    significance = sens_df.groupby(cols).apply(lambda d: np.nanmean(d.significance))
    significance = significance.reset_index().rename(columns={0: 'significance'})
    detrate['significance'] = significance['significance']
    return detrate


问题


面经


文章

微信
公众号

扫码关注公众号