python类show()的实例源码

exp_utils.py 文件源码 项目:gcforest 作者: w821881341 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def plot_confusion_matrix(cm, label_list, title='Confusion matrix', cmap=None):
    from matplotlib import pylab
    cm = np.asarray(cm, dtype=np.float32)
    for i, row in enumerate(cm):
        cm[i] = cm[i] / np.sum(cm[i])
    #import matplotlib.pyplot as plt
    #plt.ion()
    pylab.clf()
    pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
    ax = pylab.axes()
    ax.set_xticks(range(len(label_list)))
    ax.set_xticklabels(label_list, rotation='vertical')
    ax.xaxis.set_ticks_position('bottom')
    ax.set_yticks(range(len(label_list)))
    ax.set_yticklabels(label_list)
    pylab.title(title)
    pylab.colorbar()
    pylab.grid(False)
    pylab.xlabel('Predicted class')
    pylab.ylabel('True class')
    pylab.grid(False)
    pylab.savefig('test.jpg')
    pylab.show()
tdose_utilities.py 文件源码 项目:TDOSE 作者: kasperschmidt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def gen_aperture(imgsize,ypos,xpos,radius,pixval=1,showaperture=False,verbose=True):
    """
    Generating an aperture image

    --- INPUT ---
    imgsize       The dimensions of the array to return. Expects [y-size,x-size].
                  The aperture will be positioned in the center of a (+/-x-size/2., +/-y-size/2) sized array
    ypos          Pixel position in the y direction
    xpos          Pixel position in the x direction
    radius        Radius of aperture in pixels
    showaperture  Display image of generated aperture
    verbose       Toggle verbosity

    --- EXAMPLE OF USE ---
    import tdose_utilities as tu
    apertureimg  = tu.gen_aperture([20,40],10,5,10,showaperture=True)
    apertureimg  = tu.gen_aperture([2000,4000],900,1700,150,showaperture=True)

    """
    if verbose: print ' - Generating aperture in image (2D array)'
    y , x    = np.ogrid[-ypos:imgsize[0]-ypos, -xpos:imgsize[1]-xpos]
    mask     = x*x + y*y <= radius**2.
    aperture = np.zeros(imgsize)

    if verbose: print ' - Assigning pixel value '+str(pixval)+' to aperture'
    aperture[mask] = pixval

    if showaperture:
        if verbose: print ' - Displaying resulting image of aperture'
        plt.imshow(aperture,interpolation='none')
        plt.title('Generated aperture')
        plt.show()

    return aperture
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
tdose_model_FoV.py 文件源码 项目:TDOSE 作者: kasperschmidt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def residual_multigauss(param, dataimage, nonfinite = 0.0, ravelresidual=True, showimages=False, verbose=False):
    """
    Calculating the residual bestween the multigaussian model with the paramters 'param' and the data.

    --- INPUT ---
    param         Parameters of multi-gaussian model to generate. See modelimage_multigauss() header for details
    dataimage     Data image to take residual
    nonfinite     Value to replace non-finite entries in residual with
    ravelresidual To np.ravel() the residual image set this to True. Needed by scipy.optimize.leastsq()
                  optimizer function
    showimages    To show model and residiual images set to True
    verbose       Toggle verbosity

    --- EXAMPLE OF USE ---
    import tdose_model_FoV as tmf
    param      = [18,31,1*0.3,2.1*0.3,1.2*0.3,30*0.3,    110,90,200*0.5,20.1*0.5,15.2*0.5,0*0.5]
    dataimg    = pyfits.open('/Users/kschmidt/work/TDOSE/mock_cube_sourcecat161213_tdose_mock_cube.fits')[0].data[0,:,:]
    residual   = tmf.residual_multigauss(param, dataimg, showimages=True)

    """
    if verbose: ' - Estimating residual (= model - data) between model and data image'
    imgsize      = dataimage.shape
    xgrid, ygrid = tu.gen_gridcomponents(imgsize)
    modelimg     = tmf.modelimage_multigauss((xgrid, ygrid),param,imgsize,showmodelimg=showimages, verbose=verbose)

    residualimg  = modelimg - dataimage

    if showimages:
        plt.imshow(residualimg,interpolation='none', vmin=1e-5, vmax=np.max(residualimg), norm=mpl.colors.LogNorm())
        plt.title('Resdiaul (= model - data) image')
        plt.show()

    if nonfinite is not None:
        residualimg[~np.isfinite(residualimg)] = 0.0

    if ravelresidual:
        residualimg = np.ravel(residualimg)

    return residualimg
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
analyzer.py 文件源码 项目:toll_road 作者: idosekely 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def plot(ts):
        if not plt:
            print ""
        fig, ax = plt.subplots()
        lined = dict()

        ax.set_title('Click on legend line to toggle line on/off')
        lines = [ax.plot(ts[col], label=col) for col in ts.columns]
        leg = ax.legend(loc='best')

        for legline, origline in zip(leg.get_lines(), lines):
            legline.set_picker(5)  # 5 pts tolerance
            lined[legline] = origline[0]

        def onpick(event):
            # on the pick event, find the orig line corresponding to the
            # legend proxy line, and toggle the visibility
            legline = event.artist
            origline = lined[legline]
            vis = not origline.get_visible()
            origline.set_visible(vis)
            # Change the alpha on the line in the legend so we can see what lines
            # have been toggled
            if vis:
                legline.set_alpha(1.0)
            else:
                legline.set_alpha(0.2)
            fig.canvas.draw()

        fig.canvas.mpl_connect('pick_event', onpick)
        plt.show(False)
word2vec_cbow.py 文件源码 项目:DeepLearning 作者: STHSF 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def plot(embeddings, labels):
    assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
    pylab.figure(figsize=(15, 15))  # in inches
    for i, label in enumerate(labels):
        x, y = embeddings[i, :]
        pylab.scatter(x, y)
        pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
                       ha='right', va='bottom')
    pylab.show()
rasta_plp_extractor.py 文件源码 项目:speech_feature_extractor 作者: ZhihaoDU 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def rasta_plp_extractor(x, sr, plp_order=0, do_rasta=True):
    spec = log_power_spectrum_extractor(x, int(sr*0.02), int(sr*0.01), 'hamming', False)
    bark_filters = int(np.ceil(freq2bark(sr//2)))
    wts = get_fft_bark_mat(sr, int(sr*0.02), bark_filters)
    '''
    plt.figure()
    plt.subplot(211)
    plt.imshow(wts)
    plt.subplot(212)
    plt.hold(True)
    for i in range(18):
        plt.plot(wts[i, :])
    plt.show()
    '''
    bark_spec = np.matmul(wts, spec)
    if do_rasta:
        bark_spec = np.where(bark_spec == 0.0, np.finfo(float).eps, bark_spec)
        log_bark_spec = np.log(bark_spec)
        rasta_log_bark_spec = rasta_filt(log_bark_spec)
        bark_spec = np.exp(rasta_log_bark_spec)
    post_spec = postaud(bark_spec, sr/2.)
    if plp_order > 0:
        lpcas = do_lpc(post_spec, plp_order)
        # lpcas = do_lpc(spec, plp_order) # just for test
    else:
        lpcas = post_spec
    return lpcas
gcd.py 文件源码 项目:Spherical-robot 作者: Evan-Zhao 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def plot(l, samp, w1, w2, cor):
    time_range = numpy.arange(0, l) * (1.0 / samp)

    pl.figure(1)
    pl.subplot(211)
    pl.plot(time_range, w1)
    pl.subplot(212)
    pl.plot(time_range, w2, c="r")
    pl.xlabel("time")

    pl.figure(2)
    pl.plot(time_range, cor)
    pl.show()
cut_chan.py 文件源码 项目:Spherical-robot 作者: Evan-Zhao 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def main():
    sampling, maxvalue, wave_data = record.record()

    # Pick out two channels for our study.
    w1, w2 = wave_data[1:3]
    nframes = w1.shape[0]

    # Cut one channel in the tail, while the other in the head,
    # to guarantee same length and first delays second.
    cut_time_len = 0.2  # second
    cut_len = int(cut_time_len * sampling)
    wp1 = w1[:-cut_len]
    wp2 = w2[cut_len:]

    # Get their reduced (amplitude) version, and
    # calculate correlation.
    a = numpy.array(wp1, dtype=numpy.double) / maxvalue
    b = numpy.array(wp2, dtype=numpy.double) / maxvalue
    delay_time = delay.fst_delay_snd(a, b, sampling)

    # Plot the channels, also the correlation.
    time_range = numpy.arange(0, nframes - cut_len)*(1.0/sampling)

    # Still shows the original signal
    pl.figure(1)
    pl.subplot(211)
    pl.plot(time_range, wp1)
    pl.subplot(212)
    pl.plot(time_range, wp2, c="r")
    pl.xlabel("time")
    pl.show()

    # Print delay
    print("Chan 1 delay chan 2 by {0}".format(delay_time))
pad_chan.py 文件源码 项目:Spherical-robot 作者: Evan-Zhao 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def main():
    sampling, maxvalue, wave_data = record.record()

    # Pick out two channels for our study.
    w1, w2 = wave_data[0:2]
    nframes = w1.shape[0]

    # Pad one channel in the head, while the other in the tail,
    # to guarantee same length.
    pad_time_len = 0.01  # second
    pad_len = int(pad_time_len * sampling)
    pad_arr = numpy.zeros(pad_len)
    wp1 = numpy.concatenate((pad_arr, w1))
    wp2 = numpy.concatenate((w2, pad_arr))

    # Get their reduced (amplitude) version, and
    # calculate correlation.
    a = numpy.array(wp1, dtype=numpy.double) / maxvalue
    b = numpy.array(wp2, dtype=numpy.double) / maxvalue
    delay_time = delay.fst_delay_snd(a, b, sampling)

    # Plot the channels, also the correlation.
    time_range = numpy.arange(0, nframes + pad_len)*(1.0/sampling)

    # Still shows the original signal
    pl.figure(1)
    pl.subplot(211)
    pl.plot(time_range, wp1)
    pl.subplot(212)
    pl.plot(time_range, wp2, c="r")
    pl.xlabel("time")
    pl.show()

    # Print delay
    print("Chan 1 delay chan 2 by {0}".format(delay_time))
visual.py 文件源码 项目:Spherical-robot 作者: Evan-Zhao 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def plot_channel(audio, sampling):
    channels, nframes = audio.shape[0], audio.shape[1]
    time_range = numpy.arange(0, nframes) * (1.0 / sampling)

    for i in range(1, channels + 1):
        pl.figure(i)
        pl.plot(time_range, audio[i - 1])
        pl.xlabel("time{0}".format(i))

    pl.show()
generate_plots.py 文件源码 项目:hand_eye_calibration 作者: ethz-asl 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def generate_box_plot(dataset, methods, position_rmses, orientation_rmses):

  num_methods = len(methods)
  x_ticks = np.linspace(0., 1., num_methods)

  width = 0.3 / num_methods
  spacing = 0.3 / num_methods
  fig, ax1 = plt.subplots()
  ax1.set_ylabel('RMSE position [m]', color='b')
  ax1.tick_params('y', colors='b')
  fig.suptitle(
      "Hand-Eye Calibration Method Error {}".format(dataset), fontsize='24')
  bp_position = ax1.boxplot(position_rmses, 0, '',
                            positions=x_ticks - spacing, widths=width)
  plt.setp(bp_position['boxes'], color='blue', linewidth=line_width)
  plt.setp(bp_position['whiskers'], color='blue', linewidth=line_width)
  plt.setp(bp_position['fliers'], color='blue',
           marker='+', linewidth=line_width)
  plt.setp(bp_position['caps'], color='blue', linewidth=line_width)
  plt.setp(bp_position['medians'], color='blue', linewidth=line_width)
  ax2 = ax1.twinx()
  ax2.set_ylabel('RMSE Orientation [$^\circ$]', color='g')
  ax2.tick_params('y', colors='g')
  bp_orientation = ax2.boxplot(
      orientation_rmses, 0, '', positions=x_ticks + spacing, widths=width)
  plt.setp(bp_orientation['boxes'], color='green', linewidth=line_width)
  plt.setp(bp_orientation['whiskers'], color='green', linewidth=line_width)
  plt.setp(bp_orientation['fliers'], color='green',
           marker='+')
  plt.setp(bp_orientation['caps'], color='green', linewidth=line_width)
  plt.setp(bp_orientation['medians'], color='green', linewidth=line_width)

  plt.xticks(x_ticks, methods)
  plt.xlim(x_ticks[0] - 2.5 * spacing, x_ticks[-1] + 2.5 * spacing)

  plt.show()
generate_plots.py 文件源码 项目:hand_eye_calibration 作者: ethz-asl 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def generate_time_plot(methods, datasets, runtimes_per_method, colors):
  num_methods = len(methods)
  num_datasets = len(datasets)
  x_ticks = np.linspace(0., 1., num_methods)

  width = 0.6 / num_methods / num_datasets
  spacing = 0.4 / num_methods / num_datasets
  fig, ax1 = plt.subplots()
  ax1.set_ylabel('Time [s]', color='b')
  ax1.tick_params('y', colors='b')
  ax1.set_yscale('log')
  fig.suptitle("Hand-Eye Calibration Method Timings", fontsize='24')
  handles = []
  for i, dataset in enumerate(datasets):
    runtimes = [runtimes_per_method[dataset][method] for method in methods]
    bp = ax1.boxplot(
        runtimes, 0, '',
        positions=(x_ticks + (i - num_datasets / 2. + 0.5) *
                   spacing * 2),
        widths=width)
    plt.setp(bp['boxes'], color=colors[i], linewidth=line_width)
    plt.setp(bp['whiskers'], color=colors[i], linewidth=line_width)
    plt.setp(bp['fliers'], color=colors[i],
             marker='+', linewidth=line_width)
    plt.setp(bp['medians'], color=colors[i],
             marker='+', linewidth=line_width)
    plt.setp(bp['caps'], color=colors[i], linewidth=line_width)
    handles.append(mpatches.Patch(color=colors[i], label=dataset))
  plt.legend(handles=handles, loc=2)

  plt.xticks(x_ticks, methods)
  plt.xlim(x_ticks[0] - 2.5 * spacing * num_datasets,
           x_ticks[-1] + 2.5 * spacing * num_datasets)

  plt.show()
time_alignment_plotting_tools.py 文件源码 项目:hand_eye_calibration 作者: ethz-asl 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def plot_angular_velocities(title,
                            angular_velocities,
                            angular_velocities_filtered,
                            block=True):
  fig = plt.figure()

  title_position = 1.05

  fig.suptitle(title, fontsize='24')

  a1 = plt.subplot(1, 2, 1)
  a1.set_title(
      "Angular Velocities Before Filtering \nvx [red], vy [green], vz [blue]",
      y=title_position)
  plt.plot(angular_velocities[:, 0], c='r')
  plt.plot(angular_velocities[:, 1], c='g')
  plt.plot(angular_velocities[:, 2], c='b')

  a2 = plt.subplot(1, 2, 2)
  a2.set_title(
      "Angular Velocities After Filtering \nvx [red], vy [green], vz [blue]", y=title_position)
  plt.plot(angular_velocities_filtered[:, 0], c='r')
  plt.plot(angular_velocities_filtered[:, 1], c='g')
  plt.plot(angular_velocities_filtered[:, 2], c='b')

  plt.subplots_adjust(left=0.025, right=0.975, top=0.8, bottom=0.05)

  if plt.get_backend() == 'TkAgg':
    mng = plt.get_current_fig_manager()
    max_size = mng.window.maxsize()
    max_size = (max_size[0], max_size[1] * 0.45)
    mng.resize(*max_size)
  plt.show(block=block)
random_projection.py 文件源码 项目:DataMining 作者: lidalei 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def plot_distortion(training_data_instances):
    # dimension of a training data instance
    d = training_data_instances.shape[1]
    # first m instances considered
    m = 20

    fig, axes = plt.subplots(1, 1)
    fig.suptitle("Distortion of random projection", fontsize = "x-large")

    for k in [50, 100, 500]:
        ## generate random projection matrix
        random_projection_matrix =  generate_random_projection_matrix(k, d)
        ## random projection
        m_instances = training_data_instances[0:m]
        projected_m_instances = np.dot(m_instances, np.transpose(random_projection_matrix))
        # print random_projected_matrix[0], random_projected_matrix.shape
        ## evaluate distortion - line chart
        m_instances_distortions = []
        for i in range(m):
            for j in range(i + 1, m):
                m_instances_distortions.append(euclidean(projected_m_instances[i], projected_m_instances[j]) / euclidean(m_instances[i], m_instances[j]))
        m_instances_distortions = np.array(m_instances_distortions)
        mean, std = np.mean(m_instances_distortions), np.std(m_instances_distortions)
        # line chart
        axes.plot(m_instances_distortions, label = "k=" + str(k))
        axes.plot([0, m_instances_distortions.size], [mean, mean], label = "k=" + str(k) + ", mean = " + str(round(mean, 4)))

        print "k = ", k, "distortion =", mean, "+-", std
    axes.set_xlabel("pairs of instances", fontsize = "large")
    axes.set_ylabel("distortion", fontsize = "large")
    axes.legend(loc = "center right", fontsize = "medium")
    plt.show()
DataRow.py 文件源码 项目:face-landmark 作者: lsy17096535 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def getGitRepFolder():
#    import subprocess
#    return subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip()
    return '/home/ly/workspace/Vanilla'
DataRow.py 文件源码 项目:face-landmark 作者: lsy17096535 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def plot(self):
        from matplotlib.pylab import show, plot, stem
        pass
DataRow.py 文件源码 项目:face-landmark 作者: lsy17096535 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def show(self, r=3, color=255, other=None, title=None):
        M = self.drawLandmarks(r, color, other, title)
        if title is None:
            title = self.name
        # my debug
        #cv2.imshow(title, M)

        return M
experiment_1_laplace_kernel_approximation.py 文件源码 项目:mondrian-kernel 作者: matejbalog 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def main():
    initialize_plotting()
    experiment_convergence_kernelerror()
    experiment_convergence_testerror()
    plt.show()
hack_dev.py 文件源码 项目:ml-projects 作者: saopayne 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def hackathon_GBC_model(clf, train, features):
    clf.fit(train[features], train["Class"])
    probab_of_predict = clf.predict_proba(train[features])[:,1]
    predict_train = clf.predict(train[features])
    cv_score = cross_val_score(clf, train[features], train["Class"], cv=5, scoring="roc_auc")
    print("----------------------Model performance-----------------------")
    print("Accuracy score: ", accuracy_score(train["Class"].values, predict_train))
    print("AUC: ", roc_auc_score(train["Class"],probab_of_predict) )
    print("CV score: Mean - {}, Max - {}, Min - {}, Std - {}".format(np.mean(cv_score), np.max(cv_score),
                                                                     np.min(cv_score), np.std(cv_score)))

    Relative_Feature_importance = pd.Series(clf.feature_importances_, features).sort_values(ascending=False)
    Relative_Feature_importance.plot(kind='bar', title='Order of Feature Importance')
    plt.ylabel('Feature Importance')
    plt.show()
gpr_alpha_examples.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def run_regression_1D_collapsed():
    np.random.seed(42)

    print "create dataset ..."
    Xtrain, ytrain, Xtest, ytest = create_dataset()

    alphas = [0.001, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 1]
    for alpha in alphas:
        M = 20
        model = vfe.SGPR_collapsed(Xtrain, ytrain, M)
        model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=1000, disp=False)
        my, vy = model.predict_y(Xtest, alpha)
        my = np.reshape(my, ytest.shape)
        vy = np.reshape(vy, ytest.shape)
        rmse = np.sqrt(np.mean((my - ytest)**2))
        ll = np.mean(-0.5 * np.log(2 * np.pi * vy) - 0.5 * (ytest - my)**2 / vy)
        nlml, _ = model.objective_function(model.get_hypers(), alpha)
        print 'alpha=%.3f, train ml=%3f, test rmse=%.3f, ll=%.3f' % (alpha, nlml, rmse, ll)
        # plot(model, Xtrain, ytrain)
        # plt.show()

    # should produce something like this
    # alpha=0.001, train ml=-64.573021, test rmse=0.169, ll=0.348
    # alpha=0.100, train ml=-64.616618, test rmse=0.169, ll=0.348
    # alpha=0.200, train ml=-64.626655, test rmse=0.169, ll=0.348
    # alpha=0.300, train ml=-64.644053, test rmse=0.169, ll=0.348
    # alpha=0.500, train ml=-64.756588, test rmse=0.169, ll=0.348
    # alpha=0.700, train ml=-68.755871, test rmse=0.169, ll=0.350
    # alpha=0.800, train ml=-72.153441, test rmse=0.167, ll=0.349
    # alpha=1.000, train ml=-71.305002, test rmse=0.169, ll=0.303


问题


面经


文章

微信
公众号

扫码关注公众号