python类float()的实例源码

uwb_tracker_node.py 文件源码 项目:uwb_tracker_ros 作者: eth-ait 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _compute_process_and_covariance_matrices(self, dt):
        """Computes the transition and covariance matrix of the process model and measurement model.

        Args:
             dt (float): Timestep of the discrete transition.

        Returns:
            F (numpy.ndarray): Transition matrix.
            Q (numpy.ndarray): Process covariance matrix.
            R (numpy.ndarray): Measurement covariance matrix.
        """
        F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]]))
        self.process_matrix = F
        q_p = self.process_covariance_position
        q_v = self.process_covariance_velocity
        Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt
        r = self.measurement_covariance
        R = r * np.eye(4)
        self.process_covariance = Q
        self.measurement_covariance = R
        return F, Q, R
rasterfairy.py 文件源码 项目:RasterFairy 作者: Quasimondo 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def rasterMaskToGrid( rasterMask ):
    grid = []
    mask = rasterMask['mask']
    for y in range(rasterMask['height']):
        for x in range(rasterMask['width']):
            if mask[y,x]==0:
                grid.append([x,y])

    grid = np.array(grid,dtype=np.float)
    if not (rasterMask is None) and rasterMask['hex'] is True:
        f = math.sqrt(3.0)/2.0 
        offset = -0.5
        if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
            offset = 0.5
        for i in range(len(grid)):
            if (grid[i][1]%2.0==0.0):
                grid[i][0]-=offset
            grid[i][1] *= f
    return grid
rasterfairy.py 文件源码 项目:RasterFairy 作者: Quasimondo 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def getBestCircularMatch(n):
    bestc = n*2
    bestr = 0
    bestrp = 0.0

    minr = int(math.sqrt(n / math.pi))
    for rp in range(0,10):
        rpf = float(rp)/10.0
        for r in range(minr,minr+3):
            rlim = (r+rpf)*(r+rpf)
            c = 0
            for y in range(-r,r+1):
                yy = y*y
                for x in range(-r,r+1):
                    if x*x+yy<rlim:
                        c+=1
            if c == n:
                return r,rpf,c

            if c>n and c < bestc:
                bestrp = rpf
                bestr = r
                bestc = c
    return bestr,bestrp,bestc
util.py 文件源码 项目:table-compositor 作者: InvestmentSystems 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def df_type_to_str(i):
    '''
    Convert into simple datatypes from pandas/numpy types
    '''
    if isinstance(i, np.bool_):
        return bool(i)
    if isinstance(i, np.int_):
        return int(i)
    if isinstance(i, np.float):
        if np.isnan(i):
            return 'NaN'
        elif np.isinf(i):
            return str(i)
        return float(i)
    if isinstance(i, np.uint):
        return int(i)
    if type(i) == bytes:
        return i.decode('UTF-8')
    if isinstance(i, (tuple, list)):
        return str(i)
    if i is pd.NaT:  # not identified as a float null
        return 'NaN'
    return str(i)
summary_manager.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def add(self, x):
        x = float(x)
        n1 = self.count

        self.count += 1

        if x < self.min or self.min is None:
            self.min = x

        if x > self.max or self.max is None:
            self.max = x

        delta = x - self.M1
        delta_n = delta / self.count
        delta_n2 = delta_n * delta_n
        term = delta * delta_n * n1
        self.M1 += delta_n
        self.M4 += term * delta_n2 * \
                ( self.count * self.count - 3*self.count + 3 ) + \
                6 * delta_n2 * self.M2 - 4 * delta_n * self.M3
        self.M3 += term * delta_n * (self.count - 2) - 3 * delta_n * self.M2
        self.M2 += term

        return
coverage.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_depth_info_json(info):
    fixed_info = {int(x): y for (x, y) in info.iteritems()}

    total_depth_counts = sum(fixed_info.values())
    median_depth = None
    sorted_depths = sorted(fixed_info.keys())
    seen_depth_count = 0
    mean_depth = 0.0
    for depth in sorted_depths:
        seen_depth_count += fixed_info[depth]
        mean_depth += float(depth*fixed_info[depth])/float(total_depth_counts)
        if seen_depth_count > total_depth_counts/2 and median_depth is None:
            median_depth = depth
    zero_cov_fract = tk_stats.robust_divide(float(fixed_info.get(0, 0.0)), float(total_depth_counts))

    return (mean_depth, median_depth, zero_cov_fract)
LR.py 文件源码 项目:MachineLearningProjects 作者: geallen 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def computeStep(X, y, theta):
    '''YOUR CODE HERE'''
    function_result = np.array([0,0], dtype= np.float)
    m = float(len(X))

    d1 = 0.0
    d2 = 0.0
    for i in range(len(X)):
        h1 = np.dot(theta.transpose(), X[i])
        c1 = h1 - y[i]
        d1 = d1 + c1
    j1 = d1/m
    for u in range(len(X)):
        h2 = np.dot(theta.transpose(), X[u])
        c2 = (h2 - y[u]) * X[u][1]
        d2 = d2 + c2
    j2 = d2/m

    function_result[0] = j1
    function_result[1] = j2
    return function_result



# Part 4: Implement the cost function calculation
LR.py 文件源码 项目:MachineLearningProjects 作者: geallen 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def computeCost(X, y, theta):
    '''YOUR CODE HERE'''
    m = float(len(X))

    d = 0
    for i in range(len(X)):
        h = np.dot(theta.transpose(), X[i])
        c = (h - y[i])

        c = (c **2)
        d = (d + c)
    j = (1.0 / (2 * m)) * d
    return j


# Part 5: Prepare the data so that the input X has two columns: first a column of ones to accomodate theta0 and then a column of city population data
coco.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _coco_results_one_category(self, boxes, cat_id):
    results = []
    for im_ind, index in enumerate(self.image_index):
      dets = boxes[im_ind].astype(np.float)
      if dets == []:
        continue
      scores = dets[:, -1]
      xs = dets[:, 0]
      ys = dets[:, 1]
      ws = dets[:, 2] - xs + 1
      hs = dets[:, 3] - ys + 1
      results.extend(
        [{'image_id': index,
          'category_id': cat_id,
          'bbox': [xs[k], ys[k], ws[k], hs[k]],
          'score': scores[k]} for k in range(dets.shape[0])])
    return results
pointclouds.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_xyz_points(cloud_array, remove_nans=True):
    '''
    Pulls out x, y, and z columns from the cloud recordarray, and returns a 3xN matrix.
    '''
    # remove crap points
    if remove_nans:
        mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z'])
        cloud_array = cloud_array[mask]

    # pull out x, y, and z values
    points = np.zeros(list(cloud_array.shape) + [3], dtype=np.float)
    points[...,0] = cloud_array['x']
    points[...,1] = cloud_array['y']
    points[...,2] = cloud_array['z']

    return points
paint_style.py 文件源码 项目:Neural_Artistic_Style 作者: everfor 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def parse_arguments():
    parser = argparse.ArgumentParser()

    parser.add_argument('-c', '--content', dest = 'content', help = 'Input content image', required = True)
    parser.add_argument('-s', '--styles', dest = 'styles', nargs = '+', help = 'Style image(s)', required = True)
    parser.add_argument('-o', '--output', dest = 'output', help = 'Output image', default = _default_output)
    parser.add_argument('--vgg', dest = 'vgg', help = 'Path to pretrained vgg19 network', default = _default_vgg)
    parser.add_argument('--content-weight', type = float, dest = 'content_weight', help = 'Weight for content (input) image', default = _default_content_weight)
    parser.add_argument('--style-weight', type = float, dest = 'style_weight', help = 'Weight for style image', default = _default_style_weight)
    parser.add_argument('--style-merge-weight', type = float, dest = 'style_merge_weight', nargs = '+', help = 'Weights for style merges', default = None)
    parser.add_argument('--check-per-iteration', type = int, dest = 'check_per_iteration', help = 'Frequency of checking current loss', default = _default_check_per_iteration)
    parser.add_argument('-a', '--learning-rate', type = float, dest = 'learning_rate', help = 'Learning rate for neural network', default = _default_learning_rate)
    parser.add_argument('-i', '--iterations', type = int, dest = 'iterations', help = 'Max iterations', default = _default_iterations)
    parser.add_argument('--preserve-color', type = bool, dest = 'preserve_color', help = 'Preserve color scheme of original content', default = _default_preserve_color)


    return parser.parse_args()
utils.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def combine_images(generated_images):

    total, width, height, ch = generated_images.shape
    cols = int(math.sqrt(total))
    rows = math.ceil(float(total)/cols)

    combined_image = np.zeros((height*rows, width*cols, 3),
                              dtype = generated_images.dtype)

    for index, image in enumerate(generated_images):
        i = int(index/cols)
        j = index % cols
        combined_image[width*i:width*(i+1), height*j:height*(j+1), :]\
            = image

    return combined_image
utils.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_image(filepath, image_target, image_size):

    img = imread(filepath).astype(np.float)
    h_origin, w_origin = img.shape[:2]

    if image_target > h_origin or image_target > w_origin:
        image_target = min(h_origin, w_origin)

    h_drop = int((h_origin - image_target)/2)    
    w_drop = int((w_origin - image_target)/2)

    if img.ndim == 2:
        img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))

    img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]

    img_resize = imresize(img_crop, [image_size, image_size])

    return np.array(img_resize)/127.5 - 1.
helper_functions.py 文件源码 项目:slim-python 作者: ustunb 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def easy_type(data_value):
    type_name = type(data_value).__name__
    if type_name in {"list", "set"}:
        types = {easy_type(item) for item in data_value}
        if len(types) == 1:
            return next(iter(types))
        elif types.issubset({"int", "float"}):
            return "float"
        else:
            return "multiple"
    elif type_name == "str":
        if data_value in {'True', 'TRUE'}:
            return "bool"
        elif data_value in {'False', 'FALSE'}:
            return "bool"
        else:
            return "str"
    elif type_name == "int":
        return "int"
    elif type_name == "float":
        return "float"
    elif type_name == "bool":
        return "bool"
    else:
        return "unknown"
utils.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def make_grid(tensor, nrow=8, padding=2,
              normalize=False, scale_each=False):
    """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
    nmaps = tensor.shape[0]
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
    grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
    k = 0
    for y in range(ymaps):
        for x in range(xmaps):
            if k >= nmaps:
                break
            h, h_width = y * height + 1 + padding // 2, height - padding
            w, w_width = x * width + 1 + padding // 2, width - padding

            grid[h:h+h_width, w:w+w_width] = tensor[k]
            k = k + 1
    return grid
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_read_float(self):
        """
        Tests if spike times are stored as floats if they
        are stored as floats in the file.
        """
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        st = r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms,
                               t_stop=500. * pq.ms,
                               lazy=False, id_column=0, time_column=1)
        self.assertTrue(st.magnitude.dtype == np.float)
        seg = r.read_segment(gid_list=[1], t_start=400. * pq.ms,
                             t_stop=500. * pq.ms,
                             lazy=False, id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(all([s.magnitude.dtype == np.float for s in sts]))
spiketrain.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, times, t_stop, units=None,  dtype=np.float,
                 copy=True, sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s,
                 waveforms=None, left_sweep=None, name=None, file_origin=None,
                 description=None, **annotations):
        '''
        Initializes a newly constructed :class:`SpikeTrain` instance.
        '''
        # This method is only called when constructing a new SpikeTrain,
        # not when slicing or viewing. We use the same call signature
        # as __new__ for documentation purposes. Anything not in the call
        # signature is stored in annotations.

        # Calls parent __init__, which grabs universally recommended
        # attributes and sets up self.annotations
        BaseNeo.__init__(self, name=name, file_origin=file_origin,
                         description=description, **annotations)
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def test_read_float(self):
        """
        Tests if spike times are stored as floats if they
        are stored as floats in the file.
        """
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        st = r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms,
                               t_stop=500. * pq.ms,
                               lazy=False, id_column=0, time_column=1)
        self.assertTrue(st.magnitude.dtype == np.float)
        seg = r.read_segment(gid_list=[1], t_start=400. * pq.ms,
                             t_stop=500. * pq.ms,
                             lazy=False, id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(all([s.magnitude.dtype == np.float for s in sts]))
test_object_analyzer.py 文件源码 项目:systematic-metafeatures 作者: fhpinto 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test__replace_objects_by_integers(self):
        data = pd.DataFrame([{'column1': 1.3, 'column2': "Bla"},
                             {'column1': 3.2, 'column2': "Bla"},
                             {'column1': 2.7, 'column2': "Aha"}])
        data = metafeatures.core.object_analyzer \
            ._replace_objects_by_integers(data,
                                          {0: {'type': 'numerical',
                                               'name': 'column1',
                                               'is_target': False},
                                           1: {'type': 'categorical',
                                               'name': 'column2',
                                               'is_target': False}})
        print(data)
        self.assertEqual(data.dtypes[0], np.float)
        self.assertEqual(data.dtypes[1], np.float)
        np.testing.assert_allclose(data.iloc[:, 1], [0, 0, 1])
configuration_space.py 文件源码 项目:ConfigSpace 作者: automl 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def __getitem__(self, item: str) -> Any:
        if self._query_values or item in self._values:
            return self._values.get(item)

        hyperparameter = self.configuration_space._hyperparameters[item]
        item_idx = self.configuration_space._hyperparameter_idx[item]

        if not np.isfinite(self._vector[item_idx]):
            raise KeyError()

        value = hyperparameter._transform(self._vector[item_idx])
        # Truncate the representation of the float to be of constant
        # length for a python version
        if isinstance(hyperparameter, FloatHyperparameter):
            value = float(repr(value))
        # TODO make everything faster, then it'll be possible to init all values
        # at the same time and use an OrderedDict instead of only a dict here to
        # support iterating that dict in the same order as the actual order of
        # hyperparameters
        self._values[item] = value
        return self._values[item]
pong_wrapper.py 文件源码 项目:pytorch-nec 作者: mjacar 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def step(self, action):
    screens = []
    total_reward = 0

    for t in range(4):
      screen = self.get_screen()
      screens.append(screen)
      _, reward, done, info = self.env.step(self.action_mapping[action])
      total_reward += reward
      if done or total_reward:
        if done:
          self.env.reset()
        for _ in range(20):
          self.env.step(0)
        for _ in range(3 - t):
          screens.append(screen)
        break

    screens = np.asarray(screens).astype(np.float)
    return screens, total_reward, done, info
handdetector.py 文件源码 项目:deep-prior 作者: moberweger 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def calculateCoM(self, dpt):
        """
        Calculate the center of mass
        :param dpt: depth image
        :return: (x,y,z) center of mass
        """

        dc = dpt.copy()
        dc[dc < self.minDepth] = 0
        dc[dc > self.maxDepth] = 0
        cc = ndimage.measurements.center_of_mass(dc > 0)
        num = numpy.count_nonzero(dc)
        com = numpy.array((cc[1]*num, cc[0]*num, dc.sum()), numpy.float)

        if num == 0:
            return numpy.array((0, 0, 0), numpy.float)
        else:
            return com/num
no_numba.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def diff_of_means(data_1, data_2):
    """
    Difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        np.mean(data_1) - np.mean(data_2)
    """
    data_1 = _convert_data(data_1)
    data_2 = _convert_data(data_2)

    return _diff_of_means(data_1, data_2)


# @numba.jit(nopython=True)
no_numba.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _diff_of_means(data_1, data_2):
    """
    Difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        np.mean(data_1) - np.mean(data_2)
    """
    return np.mean(data_1) - np.mean(data_2)
no_numba.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def swap_random(a, b):
    """
    Randomly swap entries in two arrays.

    Parameters
    ----------
    a : array_like
        1D array of entries to be swapped.
    b : array_like
        1D array of entries to be swapped. Must have the same lengths
        as `a`.

    Returns
    -------
    a_out : ndarray, dtype float
        Array with random entries swapped.
    b_out : ndarray, dtype float
        Array with random entries swapped.
    """
    a, b = _convert_two_data(a, b)

    return _swap_random(a, b)


# @numba.jit(nopython=True)
no_numba.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def heritability(parents, offspring):
    """
    Compute the heritability from parent and offspring samples.

    Parameters
    ----------
    parents : array_like
        Array of data for trait of parents.
    offspring : array_like
        Array of data for trait of offspring.

    Returns
    -------
    output : float
        Heritability of trait.
    """
    par, off = _convert_two_data(parents, offspring)
    covariance_matrix = np.cov(par, off)
    return covariance_matrix[0,1] / covariance_matrix[0,0]
dc_stat_think.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def _draw_bs_reps_mean(data, size=1):
    """
    Generate bootstrap replicates of the mean out of `data`.

    Parameters
    ----------
    data : array_like
        One-dimensional array of data.
    size : int, default 1
        Number of bootstrap replicates to generate.

    Returns
    -------
    output : float
        Bootstrap replicates of the mean computed from `data`.
    """
    # Set up output array
    bs_reps = np.empty(size)

    # Draw replicates
    n = len(data)
    for i in range(size):
        bs_reps[i] = np.mean(np.random.choice(data, size=n))

    return bs_reps
dc_stat_think.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _draw_bs_reps_median(data, size=1):
    """
    Generate bootstrap replicates of the median out of `data`.

    Parameters
    ----------
    data : array_like
        One-dimensional array of data.
    size : int, default 1
        Number of bootstrap replicates to generate.

    Returns
    -------
    output : float
        Bootstrap replicates of the median computed from `data`.
    """
    # Set up output array
    bs_reps = np.empty(size)

    # Draw replicates
    n = len(data)
    for i in range(size):
        bs_reps[i] = np.median(np.random.choice(data, size=n))

    return bs_reps
dc_stat_think.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _diff_of_means(data_1, data_2):
    """
    Difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        np.mean(data_1) - np.mean(data_2)
    """
    return np.mean(data_1) - np.mean(data_2)
dc_stat_think.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def studentized_diff_of_means(data_1, data_2):
    """
    Studentized difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        Studentized difference of means.

    Notes
    -----
    .. If the variance of both `data_1` and `data_2` is zero, returns
       np.nan.
    """
    data_1 = _convert_data(data_1)
    data_2 = _convert_data(data_2)

    return _studentized_diff_of_means(data_1, data_2)


问题


面经


文章

微信
公众号

扫码关注公众号