python类nanmax()的实例源码

libscores.py 文件源码 项目:AutoML5 作者: djajetic 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
imageutils.py 文件源码 项目:astrobase 作者: waqasbhatti 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def logscale_img(img_array,
                 cap=255.0,
                 coeff=1000.0):
    '''
    This scales the image according to the relation:

    logscale_img = np.log(coeff*(img/max(img))+1)/np.log(coeff)

    Taken from the DS9 scaling algorithms page at:

    http://hea-www.harvard.edu/RD/ds9/ref/how.html

    According to that page:

    coeff = 1000.0 works well for optical images
    coeff = 100.0 works well for IR images

    '''

    logscaled_img = np.log(coeff*img_array/np.nanmax(img_array)+1)/np.log(coeff)
    return cap*logscaled_img
episode.py 文件源码 项目:agent-trainer 作者: lopespm 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _learn_from_memories(self, replay_memories, q_network, global_step):
        if self._pre_learning_stage(global_step):
            loss = 0.0
            return loss

        sampled_replay_memories = replay_memories.sample(sample_size=self.hyperparameters.REPLAY_MEMORIES_TRAIN_SAMPLE_SIZE,
                                                         recent_memories_span=self.hyperparameters.REPLAY_MEMORIES_RECENT_SAMPLE_SPAN)
        consequent_states = [replay_memory.consequent_state for replay_memory in sampled_replay_memories]
        max_q_consequent_states = np.nanmax(q_network.forward_pass_batched(consequent_states), axis=1)

        train_bundles = [None] * self.hyperparameters.REPLAY_MEMORIES_TRAIN_SAMPLE_SIZE
        discount_factor = self.hyperparameters.Q_UPDATE_DISCOUNT_FACTOR
        for idx, replay_memory in enumerate(sampled_replay_memories):
            target_action_q_value = float(self._q_target(replay_memory=replay_memory,
                                                         max_q_consequent_state=max_q_consequent_states[idx],
                                                         discount_factor=discount_factor))
            train_bundles[idx] = q_network.create_train_bundle(state=replay_memory.initial_state,
                                                               action_index=replay_memory.action_index,
                                                               target_action_q_value=target_action_q_value)

        loss = q_network.train(train_bundles, global_step - self.hyperparameters.REPLAY_MEMORIES_MINIMUM_SIZE_FOR_LEARNING)
        return loss
libscores.py 文件源码 项目:AutoML4 作者: djajetic 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
libscores.py 文件源码 项目:automl_gpu 作者: abhishekkrthakur 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
tfa.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _get_max_sigma(self, R):
        """Calculate maximum sigma of scanner RAS coordinates

        Parameters
        ----------

        R : 2D array, with shape [n_voxel, n_dim]
            The coordinate matrix of fMRI data from one subject

        Returns
        -------

        max_sigma : float
            The maximum sigma of scanner coordinates.

        """

        max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
        return max_sigma
util.py 文件源码 项目:AutoML-Challenge 作者: postech-mlg-exbrain 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def sanitize_array(array):
    """
    Replace NaN and Inf (there should not be any!)
    :param array:
    :return:
    """
    a = np.ravel(array)
    #maxi = np.nanmax((filter(lambda x: x != float('inf'), a))
    #                 )  # Max except NaN and Inf
    #mini = np.nanmin((filter(lambda x: x != float('-inf'), a))
    #                 )  # Mini except NaN and Inf
    maxi = np.nanmax(a[np.isfinite(a)])
    mini = np.nanmin(a[np.isfinite(a)])
    array[array == float('inf')] = maxi
    array[array == float('-inf')] = mini
    mid = (maxi + mini) / 2
    array[np.isnan(array)] = mid
    return array
regions.py 文件源码 项目:diluvian 作者: aschampion 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def check_move_neighborhood(self, mask):
        """Given a mask block, check if any central voxels meet move threshold.

        Checks whether a cube one move in each direction from the mask center
        contains any probabilities greater than the move threshold.

        Parameters
        ----------
        mask : ndarray
            Block of mask probabilities, usually of the shape specified by
            the configured ``output_fov_shape``.

        Returns
        -------
        bool
        """
        ctr = np.asarray(mask.shape) // 2
        neigh_min = ctr - self.MOVE_DELTA
        neigh_max = ctr + self.MOVE_DELTA + 1
        neighborhood = mask[map(slice, neigh_min, neigh_max)]
        return np.nanmax(neighborhood) >= CONFIG.model.t_move
semiautoanno.py 文件源码 项目:semi-auto-anno 作者: moberweger 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def evaluateToGT(self, Li, idxs):
        """
        Evaluate the current estimate to a ground truth
        :param Li: current estimates
        :param idxs: idxs to evaluate
        :return: mean error, max error and MD score
        """

        if not isinstance(idxs, numpy.ndarray):
            idxs = numpy.asarray(idxs)

        if self.gt3D is not None:
            gt3D_subset = self.gt3D[idxs]
            if Li.shape[0] == len(idxs):
                Li_subset = Li
            else:
                Li_subset = Li[idxs]
            mean_error = numpy.mean(numpy.sqrt(numpy.square((gt3D_subset - Li_subset.reshape(gt3D_subset.shape))*self.Di_scale[idxs, None, None]).sum(axis=2)), axis=1).mean()
            max_error = numpy.max(numpy.sqrt(numpy.square((gt3D_subset - Li_subset.reshape(gt3D_subset.shape))*self.Di_scale[idxs, None, None]).sum(axis=2)))
            vals = [(numpy.nanmax(numpy.sqrt(numpy.square((gt3D_subset - Li_subset.reshape(gt3D_subset.shape))*self.Di_scale[idxs, None, None]).sum(axis=2)), axis=1) <= j).sum() / float(gt3D_subset.shape[0]) for j in range(0, 80)]
            md_score = numpy.asarray(vals).sum() / float(80.)

            return mean_error, max_error, md_score
        else:
            return 0., 0., 0.
base.py 文件源码 项目:MDT 作者: cbclab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def min_max(self, mask=None):
        """Get the minimum and maximum value in this data.

        If a mask is provided we get the min and max value within the given mask.

        Infinities and NaN's are ignored by this algorithm.

        Args:
            mask (ndarray): the mask, we only include elements for which the mask > 0

        Returns:
            tuple: (min, max) the minimum and maximum values
        """
        if mask is not None:
            roi = mdt.create_roi(self.data, mask)
            return np.nanmin(roi), np.nanmax(roi)
        return np.nanmin(self.data), np.nanmax(self.data)
test_derived_quantities.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_extrema():
    for nprocs in [1, 2, 4, 8]:
        ds = fake_random_ds(16, nprocs = nprocs, fields = ("density",
                "velocity_x", "velocity_y", "velocity_z"))
        for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]:
            mi, ma = sp.quantities["Extrema"]("density")
            assert_equal(mi, np.nanmin(sp["density"]))
            assert_equal(ma, np.nanmax(sp["density"]))
            dd = ds.all_data()
            mi, ma = dd.quantities["Extrema"]("density")
            assert_equal(mi, np.nanmin(dd["density"]))
            assert_equal(ma, np.nanmax(dd["density"]))
            sp = ds.sphere("max", (0.25, 'unitary'))
            assert_equal(np.any(np.isnan(sp["radial_velocity"])), False)
            mi, ma = dd.quantities["Extrema"]("radial_velocity")
            assert_equal(mi, np.nanmin(dd["radial_velocity"]))
            assert_equal(ma, np.nanmax(dd["radial_velocity"]))
profiles.py 文件源码 项目:MOSPAT 作者: CR2MOS 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def mospat_plot_profile(f_Var, f_Height, c_Var, c_Net, c_Stn, t_ObsStationVerticalData):
    f_lat = t_ObsStationVerticalData[c_Net][c_Stn]['f_Lat']
    f_lon = t_ObsStationVerticalData[c_Net][c_Stn]['f_Lon']
    f_Elev = t_ObsStationVerticalData[c_Net][c_Stn]['f_Elevation']
    f_hmax = 5000.
    if np.nanmax(f_Height) >= f_hmax:
        i_hmax = np.where(np.array(f_Height) <= np.array(f_hmax))[0][-1]
        f_Height = f_Height[:i_hmax]
        f_Var = f_Var[:i_hmax]
    c_var_label = ConfP.mospat_config_labels(c_Var)
    c_coord = ' [lat=%0.2f lon=%0.2f elev=%0.2f]' % (f_lat, f_lon, f_Elev)
    fig, ax = plt.subplots(figsize=(ConfP.f_PFSize[0], ConfP.f_PFSize[1]))
    line = ax.plot(f_Var, f_Height / 1000., color=ConfP.c_ObsColor[0], marker=ConfP.c_ObsMarker,
                   linestyle=ConfP.c_ObsLine[0], label=c_Net)
    ax.set_title(c_Stn + c_coord)
    ax.set_ylabel('Height [km]')
    ax.set_xlabel(c_var_label)
    ax.set_ylim([0, np.nanmax(f_Height / 1000.)])
    return fig, ax, line
imageblink.py 文件源码 项目:CAAPR 作者: Stargrazer82301 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def add_image(self, image):

        """
        This function ...
        :param image:
        :return:
        """

        # Create an animation to show the result of the source extraction step
        if self.max_frame_value is None: self.max_frame_value = np.nanmax(image)

        # Make a plot of the image
        buf = io.BytesIO()
        plotting.plot_box(image, path=buf, format="png", vmin=0.0, vmax=0.5*self.max_frame_value)
        buf.seek(0)
        im = imageio.imread(buf)
        buf.close()

        self.add_frame(im)

# -----------------------------------------------------------------
interpolation.py 文件源码 项目:CAAPR 作者: Stargrazer82301 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def inpaint_biharmonic(frame, mask):

    """
    This function ...
    :param frame:
    :param mask:
    :return:
    """

    maximum = np.nanmax(frame)
    normalized = frame / maximum
    data = inpaint.inpaint_biharmonic(normalized, mask, multichannel=False)

    return data * maximum

# -----------------------------------------------------------------

# TODO: create a better inpainting function. OpenCV has one, but this is a terrible dependency because it's hard to
# install. Options:
#  - The below replace_nans function can be replaced by the more up to date version at:
#    https://github.com/OpenPIV/openpiv-python/blob/master/openpiv/src/lib.pyx
#    We may want to keep it in cython so that it runs faster. However, this original does not have the inverse distance
#    weighing as in the code below, but we can maybe add this ourselves in the cython code
#  - Write our own code.
# SOLUTION: SEE FUNCTION ABOVE, GENERALLY, IT IS MUCH BETTER
imageblink.py 文件源码 项目:CAAPR 作者: Stargrazer82301 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def add_image(self, image):

        """
        This function ...
        :param image:
        :return:
        """

        # Create an animation to show the result of the source extraction step
        if self.max_frame_value is None: self.max_frame_value = np.nanmax(image)

        # Make a plot of the image
        buf = io.BytesIO()
        plotting.plot_box(image, path=buf, format="png", vmin=0.0, vmax=0.5*self.max_frame_value)
        buf.seek(0)
        im = imageio.imread(buf)
        buf.close()

        self.add_frame(im)

# -----------------------------------------------------------------
interpolation.py 文件源码 项目:CAAPR 作者: Stargrazer82301 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def inpaint_biharmonic(frame, mask):

    """
    This function ...
    :param frame:
    :param mask:
    :return:
    """

    maximum = np.nanmax(frame)
    normalized = frame / maximum
    data = inpaint.inpaint_biharmonic(normalized, mask, multichannel=False)

    return data * maximum

# -----------------------------------------------------------------

# TODO: create a better inpainting function. OpenCV has one, but this is a terrible dependency because it's hard to
# install. Options:
#  - The below replace_nans function can be replaced by the more up to date version at:
#    https://github.com/OpenPIV/openpiv-python/blob/master/openpiv/src/lib.pyx
#    We may want to keep it in cython so that it runs faster. However, this original does not have the inverse distance
#    weighing as in the code below, but we can maybe add this ourselves in the cython code
#  - Write our own code.
# SOLUTION: SEE FUNCTION ABOVE, GENERALLY, IT IS MUCH BETTER
nan_maximum_filter.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _calc(arr, out, ksize):
    gx = arr.shape[0]
    gy = arr.shape[1]
    for i in range(gx):
        for j in range(gy):   

            xmn = i-ksize
            if xmn < 0:
                xmn = 0
            xmx = i+ksize
            if xmx > gx:
                xmx = gx

            ymn = j-ksize
            if ymn < 0:
                ymn = 0
            ymx = j+ksize
            if ymx > gy:
                ymx = gy

            out[i,j] = np.nanmax(arr[xmn:xmx,ymn:ymx])
image_utils_ocl.py 文件源码 项目:vsi_common 作者: VisionSystemsInc 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def local_entropy(ocl_ctx, img, window_radius, num_bins=8):
  """ compute local entropy using a sliding window """
  mf = cl.mem_flags
  cl_queue = cl.CommandQueue(ocl_ctx)
  img_np = np.array(img).astype(np.float32)
  img_buf = cl.Buffer(ocl_ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=img_np)
  min_val = np.nanmin(img)
  max_val = np.nanmax(img)
  entropy = np.zeros_like(img,dtype=np.float32)
  dest_buf = cl.Buffer(ocl_ctx, mf.WRITE_ONLY, entropy.nbytes)
  cl_dir = os.path.dirname(__file__)
  cl_filename = cl_dir + '/cl/local_entropy.cl'
  with open(cl_filename, 'r') as fd:
    clstr = fd.read()
  prg = cl.Program(ocl_ctx, clstr).build()
  prg.local_entropy(cl_queue, entropy.shape, None,
                    img_buf, dest_buf,
                    np.int32(img.shape[1]), np.int32(img.shape[0]),
                    np.int32(window_radius), np.int32(num_bins),
                    np.float32(min_val), np.float32(max_val))

  cl.enqueue_copy(cl_queue, entropy, dest_buf)
  cl_queue.finish()

  return entropy
estimator.py 文件源码 项目:scikit-gstat 作者: mmaelicke 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def minmax(X):
    """
    Returns the MinMax Semivariance of sample X.
    X has to be an even-length array of point pairs like: x1, x1+h, x2, x2+h, ..., xn, xn+h.

    :param X:
    :return:
    """
    _X = np.asarray(X)

    if any([isinstance(_, list) or isinstance(_, np.ndarray) for _ in _X]):
        return [minmax(_) for _ in _X]

    # check even
    if len(_X) % 2 > 0:
        raise ValueError('The sample does not have an even length: {}'.format(_X))

    return (np.nanmax(_X) - np.nanmin(_X)) / np.nanmean(_X)
test_table_formatters.py 文件源码 项目:PyBloqs 作者: manahl 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_FmtHeatmap__get_min_max_from_selected_cell_values_with_cache():
    df_pn = df - 5.
    cache = {}
    fmt = pbtf.FmtHeatmap(cache=cache)
    res = fmt._get_min_max_from_selected_cell_values(None, None, df_pn)
    assert len(cache) == 1 and (None, None) in cache.keys()
    assert res == (np.nanmin(df_pn), np.nanmax(df_pn))

    min_value, max_value = np.nanmin(df.loc[['a'], ['aa', 'bb']]), np.nanmax(df.loc[['a'], ['aa', 'bb']])
    res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df)
    assert len(cache) == 2 and (frozenset(['a']), frozenset(['aa', 'bb'])) in cache.keys()
    assert res == (min_value, max_value)

    res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df)
    assert len(cache) == 2 and (frozenset(['a']), frozenset(['aa', 'bb'])) in cache.keys()
    assert res == (min_value, max_value)
test_table_formatters.py 文件源码 项目:PyBloqs 作者: manahl 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_FmtHeatmap__get_min_max_from_selected_cell_values_without_cache():
    df_pn = df - 5.
    cache = None
    fmt = pbtf.FmtHeatmap(cache=cache)
    res = fmt._get_min_max_from_selected_cell_values(None, None, df_pn)
    assert cache is None
    assert res == (np.nanmin(df_pn), np.nanmax(df_pn))

    min_value, max_value = np.nanmin(df.loc[['a'], ['aa', 'bb']]), np.nanmax(df.loc[['a'], ['aa', 'bb']])
    res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df)
    assert cache is None
    assert res == (min_value, max_value)

    res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df)
    assert cache is None
    assert res == (min_value, max_value)
SkewTobj.py 文件源码 项目:PyGEOMET 作者: pygeomet 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def MostUnstable(self, t, p, q, td):

        #Determine psfc - 300mb
        minP = p[0] - 300.
        diff = p - minP
        ind = np.where(diff > 0)

        #Determine max theta-e
        vp = self.VaporPressure(td[ind])
        t_lcl = self.TempLCL(t[ind]+273.15,td[ind])
        thetae = self.theta[ind] * np.exp((3.376/t_lcl - 0.00254)*1000*q[ind]*(1. + 0.81 * q[ind]))
        #thetae = self.theta[ind] * np.exp((3036/t_lcl - 1.78)*(q[ind]/1000.)*(1. + (0.448/1000.) * q[ind]))
        indmax = np.where(thetae == np.nanmax(thetae))

        #Define parcel        
        self.t_parcel = t[indmax]
        self.q_parcel = q[indmax]
        self.theta_parcel = self.theta[indmax]
        self.td_parcel = td[indmax]
        self.p_parcel = p[0]

    #Lifted Parcel Temperature
image_to_world.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def depth_callback(self,data):
        try:
            self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
        except CvBridgeError as e:
            print(e)
        # print "depth"

        depth_min = np.nanmin(self.depth_image)
        depth_max = np.nanmax(self.depth_image)


        depth_img = self.depth_image.copy()
        depth_img[np.isnan(self.depth_image)] = depth_min
        depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
        cv2.imshow("Depth Image", depth_img)
        cv2.waitKey(5)
        # stream = open("/home/chentao/depth_test.yaml", "w")
        # data = {'img':depth_img.tolist()}
        # yaml.dump(data, stream)
grasp_pos_rgbd_cluster.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def depth_callback(self,data):
        try:
            self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
        except CvBridgeError as e:
            print(e)
        # print "depth"

        depth_min = np.nanmin(self.depth_image)
        depth_max = np.nanmax(self.depth_image)


        depth_img = self.depth_image.copy()
        depth_img[np.isnan(self.depth_image)] = depth_min
        depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
        cv2.imshow("Depth Image", depth_img)
        cv2.waitKey(5)
        # stream = open("/home/chentao/depth_test.yaml", "w")
        # data = {'img':depth_img.tolist()}
        # yaml.dump(data, stream)
grasp_pos.py 文件源码 项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials 作者: taochenshh 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def depth_callback(self,data):
        try:
            self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
        except CvBridgeError as e:
            print(e)
        # print "depth"

        depth_min = np.nanmin(self.depth_image)
        depth_max = np.nanmax(self.depth_image)


        depth_img = self.depth_image.copy()
        depth_img[np.isnan(self.depth_image)] = depth_min
        depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
        cv2.imshow("Depth Image", depth_img)
        cv2.waitKey(5)
rasterizer.py 文件源码 项目:wrfxpy 作者: openwfm 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def basemap_raster_mercator(lon, lat, grid, cmin, cmax, cmap_name):

    # longitude/latitude extent
    lons = (np.amin(lon), np.amax(lon))
    lats = (np.amin(lat), np.amax(lat))

    # construct spherical mercator projection for region of interest
    m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1],
                llcrnrlon=lons[0],urcrnrlon=lons[1])

    #vmin,vmax = np.nanmin(grid),np.nanmax(grid)
    masked_grid = np.ma.array(grid,mask=np.isnan(grid))
    fig = plt.figure(frameon=False,figsize=(12,8),dpi=72)
    plt.axis('off')
    cmap = mpl.cm.get_cmap(cmap_name)
    m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=cmin,vmax=cmax)

    str_io = StringIO.StringIO()
    plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True)
    plt.close()

    numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ]
    float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ]
    return str_io.getvalue(), float_bounds
rasterizer.py 文件源码 项目:wrfxpy 作者: openwfm 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def basemap_barbs_mercator(u,v,lat,lon):

    # lon/lat extents
    lons = (np.amin(lon), np.amax(lon))
    lats = (np.amin(lat), np.amax(lat))

    # construct spherical mercator projection for region of interest
    m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1],
                llcrnrlon=lons[0],urcrnrlon=lons[1])

    #vmin,vmax = np.nanmin(grid),np.nanmax(grid)
    fig = plt.figure(frameon=False,figsize=(12,8),dpi=72*4)
    plt.axis('off')
    m.quiver(lon,lat,u,v,latlon=True)

    str_io = StringIO.StringIO()
    plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True)
    plt.close()

    numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ]
    float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ]
    return str_io.getvalue(), float_bounds
zonalstats.py 文件源码 项目:wradlib 作者: wradlib 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _check_vals(self, vals):
        """TODO Basic check of target elements (sequence of polygons).

        """
        if self.zdata is not None:
            lyr = self.zdata.src.ds.GetLayerByName('src')
            lyr.ResetReading()
            lyr.SetSpatialFilter(None)
            src_len = lyr.GetFeatureCount()
            assert len(vals) == src_len, \
                "Argument vals must be of length %d" % src_len
        else:
            imax = 0
            for i in self.ix:
                mx = np.nanmax(i)
                if imax < mx:
                    imax = mx
            assert len(vals) > imax, \
                "Argument vals cannot be subscripted by given index values"

        return vals
test_covariance.py 文件源码 项目:kite 作者: pyrocko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _test_covariance_visual(self):
        cov = self.sc.covariance
        cov.epsilon = .02
        cov.subsampling = 10
        # l = self.sc.quadtree.leaves[0]
        d = []
        d.append(('Full', cov._calcCovarianceMatrix(method='full',
                 nthreads=0)))
        d.append(('Focal', cov._calcCovarianceMatrix(method='focal')))

        fig, _ = plt.subplots(1, len(d))
        for i, (title, mat) in enumerate(d):
            print '%s Max %f' % (title, num.nanmax(mat)), mat.shape
            fig.axes[i].imshow(mat)
            fig.axes[i].set_title(title)
        plt.show()
multiplot.py 文件源码 项目:kite 作者: pyrocko 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def setSymColormap(self):
        cmap = {'ticks':
                [[0, (106, 0, 31, 255)],
                 [.5, (255, 255, 255, 255)],
                 [1., (8, 54, 104, 255)]],
                'mode': 'rgb'}
        cmap = {'ticks':
                [[0, (172, 56, 56)],
                 [.5, (255, 255, 255)],
                 [1., (51, 53, 120)]],
                'mode': 'rgb'}

        lvl_min = lvl_max = 0
        for plot in self.plots:
            plt_min = num.nanmin(plot.data)
            plt_max = num.nanmax(plot.data)
            lvl_max = lvl_max if plt_max < lvl_max else plt_max
            lvl_min = lvl_min if plt_min > lvl_min else plt_min

        abs_range = max(abs(lvl_min), abs(lvl_max))

        self.gradient.restoreState(cmap)
        self.setLevels(-abs_range, abs_range)


问题


面经


文章

微信
公众号

扫码关注公众号