python类rint()的实例源码

data_transforms.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def make_3d_mask(img_shape, center, radius, shape='sphere'):
    mask = np.zeros(img_shape)
    radius = np.rint(radius)
    center = np.rint(center)
    sz = np.arange(int(max(center[0] - radius, 0)), int(max(min(center[0] + radius + 1, img_shape[0]), 0)))
    sy = np.arange(int(max(center[1] - radius, 0)), int(max(min(center[1] + radius + 1, img_shape[1]), 0)))
    sx = np.arange(int(max(center[2] - radius, 0)), int(max(min(center[2] + radius + 1, img_shape[2]), 0)))
    sz, sy, sx = np.meshgrid(sz, sy, sx)
    if shape == 'cube':
        mask[sz, sy, sx] = 1.
    elif shape == 'sphere':
        distance2 = ((center[0] - sz) ** 2
                     + (center[1] - sy) ** 2
                     + (center[2] - sx) ** 2)
        distance_matrix = np.ones_like(mask) * np.inf
        distance_matrix[sz, sy, sx] = distance2
        mask[(distance_matrix <= radius ** 2)] = 1
    elif shape == 'gauss':
        z, y, x = np.ogrid[:mask.shape[0], :mask.shape[1], :mask.shape[2]]
        distance = ((z - center[0]) ** 2 + (y - center[1]) ** 2 + (x - center[2]) ** 2)
        mask = np.exp(- 1. * distance / (2 * radius ** 2))
        mask[(distance > 3 * radius ** 2)] = 0
    return mask
utils.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def data_shuffle(data_sets_org, percent_of_train, min_test_data=80, shuffle_data=False):
    """Divided the data to train and test and shuffle it"""
    perc = lambda i, t: np.rint((i * t) / 100).astype(np.int32)
    C = type('type_C', (object,), {})
    data_sets = C()
    stop_train_index = perc(percent_of_train[0], data_sets_org.data.shape[0])
    start_test_index = stop_train_index
    if percent_of_train > min_test_data:
        start_test_index = perc(min_test_data, data_sets_org.data.shape[0])
    data_sets.train = C()
    data_sets.test = C()
    if shuffle_data:
        shuffled_data, shuffled_labels = shuffle_in_unison_inplace(data_sets_org.data, data_sets_org.labels)
    else:
        shuffled_data, shuffled_labels = data_sets_org.data, data_sets_org.labels
    data_sets.train.data = shuffled_data[:stop_train_index, :]
    data_sets.train.labels = shuffled_labels[:stop_train_index, :]
    data_sets.test.data = shuffled_data[start_test_index:, :]
    data_sets.test.labels = shuffled_labels[start_test_index:, :]
    return data_sets
data_structures.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_global_startindex(self):
        """
        Return the integer starting index for each dimension at the current
        level.

        """
        if self.start_index is not None:
            return self.start_index
        if self.Parent is None:
            iLE = self.LeftEdge - self.ds.domain_left_edge
            start_index = iLE / self.dds
            return np.rint(start_index).astype('int64').ravel()
        pdx = self.Parent[0].dds
        start_index = (self.Parent[0].get_global_startindex()) + \
            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
        self.start_index = (start_index*self.ds.refine_by).astype('int64').ravel()
        return self.start_index
grid_patch.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 100 收藏 0 点赞 0 评论 0
def get_global_startindex(self):
        """
        Return the integer starting index for each dimension at the current
        level.

        """
        if self.start_index is not None:
            return self.start_index
        if self.Parent is None:
            left = self.LeftEdge.d - self.ds.domain_left_edge.d
            start_index = left / self.dds.d
            return np.rint(start_index).astype('int64').ravel()

        pdx = self.Parent.dds.d
        di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx)
        start_index = self.Parent.get_global_startindex() + di
        self.start_index = (start_index * self.ds.refine_by).astype('int64').ravel()
        return self.start_index
construction_data_containers.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _minimal_box(self, dds):
        LL = self.left_edge.d - self.ds.domain_left_edge.d
        # Nudge in case we're on the edge
        LL += np.finfo(np.float64).eps
        LS = self.right_edge.d - self.ds.domain_left_edge.d
        LS += np.finfo(np.float64).eps
        cell_start = LL / dds  # This is the cell we're inside
        cell_end = LS / dds
        if self.level == 0:
            start_index = np.array(np.floor(cell_start), dtype="int64")
            end_index = np.array(np.ceil(cell_end), dtype="int64")
            dims = np.rint((self.ActiveDimensions * self.dds.d) / dds).astype("int64")
        else:
            # Give us one buffer
            start_index = np.rint(cell_start).astype('int64') - 1
            # How many root cells do we occupy?
            end_index = np.rint(cell_end).astype('int64')
            dims = end_index - start_index + 1
        return start_index, end_index.astype("int64"), dims.astype("int32")
amr_kdtree.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def check_tree(self):
        for node in self.trunk.depth_traverse():
            if node.grid == -1:
                continue
            grid = self.ds.index.grids[node.grid - self._id_offset]
            dds = grid.dds
            gle = grid.LeftEdge
            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
            li = np.rint((nle-gle)/dds).astype('int32')
            ri = np.rint((nre-gle)/dds).astype('int32')
            dims = (ri - li).astype('int32')
            assert(np.all(grid.LeftEdge <= nle))
            assert(np.all(grid.RightEdge >= nre))
            assert(np.all(dims > 0))
            # print grid, dims, li, ri

        # Calculate the Volume
        vol = self.trunk.kd_sum_volume()
        mylog.debug('AMRKDTree volume = %e' % vol)
        self.trunk.kd_node_check()
amr_kdtree.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def sum_cells(self, all_cells=False):
        cells = 0
        for node in self.trunk.depth_traverse():
            if node.grid == -1:
                continue
            if not all_cells and not node.kd_is_leaf():
                continue
            grid = self.ds.index.grids[node.grid - self._id_offset]
            dds = grid.dds
            gle = grid.LeftEdge
            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
            li = np.rint((nle-gle)/dds).astype('int32')
            ri = np.rint((nre-gle)/dds).astype('int32')
            dims = (ri - li).astype('int32')
            cells += np.prod(dims)
        return cells
agent_mjc.py 文件源码 项目:visual_mpc 作者: febert 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def mujoco_to_imagespace(self, mujoco_coord, numpix=64, truncate=False):
        """
        convert form Mujoco-Coord to numpix x numpix image space:
        :param numpix: number of pixels of square image
        :param mujoco_coord:
        :return: pixel_coord
        """
        viewer_distance = .75  # distance from camera to the viewing plane
        window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance  # window height in Mujoco coords
        pixelheight = window_height / numpix  # height of one pixel
        pixelwidth = pixelheight
        window_width = pixelwidth * numpix
        middle_pixel = numpix / 2
        pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) /
                              pixelwidth + np.array([middle_pixel, middle_pixel]))
        pixel_coord = pixel_coord.astype(int)

        return pixel_coord
correction_train_flow.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def mujoco_to_imagespace(mujoco_coord, numpix=64):
    """
    convert form Mujoco-Coord to numpix x numpix image space:
    :param numpix: number of pixels of square image
    :param mujoco_coord:
    :return: pixel_coord
    """
    viewer_distance = .75  # distance from camera to the viewing plane
    window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance  # window height in Mujoco coords
    pixelheight = window_height / numpix  # height of one pixel
    pixelwidth = pixelheight
    window_width = pixelwidth * numpix
    middle_pixel = numpix / 2
    pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) /
                          pixelwidth + np.array([middle_pixel, middle_pixel]))
    pixel_coord = pixel_coord.astype(int)
    return pixel_coord
mesh.py 文件源码 项目:nekpy 作者: NekBox 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def get_mesh_data(self):
    import numpy as np
    letters = [chr(97+i) for i in range(26)] + [chr(65+i) for i in range(26)]
    mesh = " {:11d}  {:d} {:11d}           NEL,NDIM,NELV".format(np.prod(self.n), 3, np.prod(self.n))
    for e in range(self.elements.shape[0]):
      ix = int(np.rint((self.elements[e,0] - self.root[0])/self.delta[0]))
      iy = int(np.rint((self.elements[e,4] - self.root[1])/self.delta[1]))
      iz = int(np.rint((self.elements[e,8] - self.root[2])/self.delta[2]))
      mesh += "\n            ELEMENT {:11d} [{:5d}{:1s}]  GROUP  0\n".format(e+1, iz+1, letters[(ix+iy*self.n[0]) % 52]) 
      mesh += "  {: 13.10E}  {: 13.10E}  {: 13.10E}  {: 13.10E} \n".format(*(self.elements[e, 0: 4].tolist())) 
      mesh += "  {: 13.10E}  {: 13.10E}  {: 13.10E}  {: 13.10E} \n".format(*(self.elements[e, 4: 8].tolist())) 
      mesh += "  {: 13.10E}  {: 13.10E}  {: 13.10E}  {: 13.10E} \n".format(*(self.elements[e, 8:12].tolist())) 
      mesh += "  {: 13.10E}  {: 13.10E}  {: 13.10E}  {: 13.10E} \n".format(*(self.elements[e,12:16].tolist())) 
      mesh += "  {: 13.10E}  {: 13.10E}  {: 13.10E}  {: 13.10E} \n".format(*(self.elements[e,16:20].tolist())) 
      mesh += "  {: 13.10E}  {: 13.10E}  {: 13.10E}  {: 13.10E} ".format(*(self.elements[e,20:24].tolist())) 
    return mesh
dataset.py 文件源码 项目:neural-combinatorial-optimization-rl-tensorflow 作者: MichelDeudon 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def visualize_2D_trip(self,trip,tw_open,tw_close):
        plt.figure(figsize=(30,30))
        rcParams.update({'font.size': 22})
        # Plot cities
        colors = ['red'] # Depot is first city
        for i in range(len(tw_open)-1):
            colors.append('blue')
        plt.scatter(trip[:,0], trip[:,1], color=colors, s=200)
        # Plot tour
        tour=np.array(list(range(len(trip))) + [0])
        X = trip[tour, 0]
        Y = trip[tour, 1]
        plt.plot(X, Y,"--", markersize=100)
        # Annotate cities with TW
        tw_open = np.rint(tw_open)
        tw_close = np.rint(tw_close)
        time_window = np.concatenate((tw_open,tw_close),axis=1)
        for tw, (x, y) in zip(time_window,(zip(X,Y))):
            plt.annotate(tw,xy=(x, y))  
        plt.xlim(0,60)
        plt.ylim(0,60)
        plt.show()


    # Heatmap of permutations (x=cities; y=steps)
plotResults.py 文件源码 项目:Argon 作者: FracturedRocketSpace 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def corrHist(positions):
    g = np.zeros(config.histSteps);
    for p1 in range(1,config.nParticles):
        for p2 in range(p1):
            X = positions[p2,0] - positions[p1,0];
            Y = positions[p2,1] - positions[p1,1];
            Z = positions[p2,2] - positions[p1,2];

            X -= np.rint(X/config.lCalc) * config.lCalc;
            Y -= np.rint(Y/config.lCalc) * config.lCalc;
            Z -= np.rint(Z/config.lCalc) * config.lCalc;            

            distance = np.sqrt(X*X + Y*Y + Z*Z);
            for i in range(config.histSteps):
                if( (config.histRange/config.histSteps) * i < 
                    distance < (config.histRange/config.histSteps) * (i+1) ):
                        g[i] += 1 / ( 4 * np.pi * ((config.histRange/config.histSteps*i)**2) * (config.histRange/config.histSteps) );
                        break;    

    g = g * 2 * (config.lCalc**3) / (config.nParticles*(config.nParticles-1));        
    return g
unfolder_symmetry.py 文件源码 项目:upho 作者: yuzie007 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def create_little_group(self, kpoint):
        rotations = self._symmetry_operations["rotations"]
        translations = self._symmetry_operations["translations"]
        lattice = self._cell.get_cell()

        rotations_kpoint = []
        translations_kpoint = []
        for r, t in zip(rotations, translations):
            diff = np.dot(kpoint, r) - kpoint
            diff -= np.rint(diff)
            dist = np.linalg.norm(np.dot(np.linalg.inv(lattice), diff))
            if dist < self._symprec:
                rotations_kpoint.append(r)
                translations_kpoint.append(t)

        return np.array(rotations_kpoint), np.array(translations_kpoint)
fast_counter_dummy.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def configure(self, bin_width_s, record_length_s, number_of_gates = 0):
        """ Configuration of the fast counter.

        @param float bin_width_s: Length of a single time bin in the time trace
                                  histogram in seconds.
        @param float record_length_s: Total length of the timetrace/each single
                                      gate in seconds.
        @param int number_of_gates: optional, number of gates in the pulse
                                    sequence. Ignore for not gated counter.

        @return tuple(binwidth_s, gate_length_s, number_of_gates):
                    binwidth_s: float the actual set binwidth in seconds
                    gate_length_s: the actual set gate length in seconds
                    number_of_gates: the number of gated, which are accepted
        """
        self._binwidth = int(np.rint(bin_width_s * 1e9 * 950 / 1000))
        self._gate_length_bins = int(np.rint(record_length_s / bin_width_s))
        actual_binwidth = self._binwidth * 1000 / 950e9
        actual_length = self._gate_length_bins * actual_binwidth
        self.statusvar = 1
        return actual_binwidth, actual_length, number_of_gates
fast_counter_fpga_qo.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _set_dac_voltages(self):
        """
        """
        with self.threadlock:
            dac_sma_mapping = {1: 1, 2: 5, 3: 2, 4: 6, 5: 3, 6: 7, 7: 4, 8: 8}
            set_voltage_cmd = 0x03000000
            for dac_chnl in range(8):
                sma_chnl = dac_sma_mapping[dac_chnl+1]
                dac_value = int(np.rint(4096*self._switching_voltage[sma_chnl]/(2.5*2)))
                if dac_value > 4095:
                    dac_value = 4095
                elif dac_value < 0:
                    dac_value = 0
                tmp_cmd = set_voltage_cmd + (dac_chnl << 20) + (dac_value << 8)
                self._fpga.SetWireInValue(0x01, tmp_cmd)
                self._fpga.UpdateWireIns()
                self._fpga.ActivateTriggerIn(0x41, 0)
        return
aprsrecv.py 文件源码 项目:weakmon 作者: rtmrtmrtmrtm 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def process1(self, sliced):
    global advance

    bitsamples = self.rate / float(self.baud)
    flagsamples = bitsamples * 9 # HDLC 01111110 flag (9 b/c NRZI)

    ff = self.findflag(sliced[0:int(round(flagsamples+advance*bitsamples+2))])
    if ff != None:
      indices = numpy.arange(0, len(sliced) - (ff+2*bitsamples), bitsamples)
      indices = indices + (ff + 0.5*bitsamples)
      indices = numpy.rint(indices).astype(int)
      rawsymbols = sliced[indices]
      symbols = numpy.where(rawsymbols > 0, 1, -1)

      [ ok, msg, nsymbols ] = self.finishframe(symbols[8:])
      if ok >= 1:
        return [ ok, msg, nsymbols, ff ]

    return [ 0, None, 0, 0 ]
squeeze.py 文件源码 项目:EvadeML-Zoo 作者: mzweilin 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def opencv_wrapper(imgs, opencv_func, argv):
    ret_imgs = []
    imgs_copy = imgs

    if imgs.shape[3] == 1:
        imgs_copy = np.squeeze(imgs)

    for img in imgs_copy:
        img_uint8 = np.clip(np.rint(img * 255), 0, 255).astype(np.uint8)
        ret_img = opencv_func(*[img_uint8]+argv)
        if type(ret_img) == tuple:
            ret_img = ret_img[1]
        ret_img = ret_img.astype(np.float32) / 255.
        ret_imgs.append(ret_img)
    ret_imgs = np.stack(ret_imgs)

    if imgs.shape[3] == 1:
        ret_imgs = np.expand_dims(ret_imgs, axis=3)

    return ret_imgs


# Binary filters.
stats.py 文件源码 项目:ugali 作者: DarkEnergySurvey 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def peak_interval(data, alpha=_alpha, npoints=_npoints):
    """
    Identify interval using Gaussian kernel density estimator.
    """
    peak = kde_peak(data,npoints)
    x = np.sort(data.flat); n = len(x)
    # The number of entries in the interval
    window = int(np.rint((1.0-alpha)*n))
    # The start, stop, and width of all possible intervals
    starts = x[:n-window]; ends = x[window:]
    widths = ends - starts
    # Just the intervals containing the peak
    select = (peak >= starts) & (peak <= ends)
    widths = widths[select]
    if len(widths) == 0:
        raise ValueError('Too few elements for interval calculation')
    min_idx = np.argmin(widths)
    lo = x[min_idx]
    hi = x[min_idx+window]
    return interval(peak,lo,hi)
raster.py 文件源码 项目:pyroSAR 作者: johntruckenbrodt 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def rescale(self, function):
        """
        perform raster computations with custom functions and assign them to the exitsting raster object in memory

        Args:
            function:

        Returns:

        """
        if self.bands != 1:
            raise ValueError('only single band images supported')

        # load array
        mat = self.matrix()

        # scale values
        scaled = function(mat)

        # round to nearest integer
        rounded = np.rint(scaled)

        # assign newly computed array to raster object
        self.assign(rounded)
preprocessing.py 文件源码 项目:deepjets 作者: deepjets 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def zoom_image(image, zoom, out_width=25):
    """Return rescaled and cropped image array with width out_width.
    """
    if zoom < 1:
        raise ValueError("Zoom scale factor must be at least 1.")

    width, height = image.shape
    #if width < out_width:
    #    raise ValueError(
    #        "image width before zooming ({0}) is less "
    #        "than requested output width ({1})".format(width, out_width))
    out_height = int(np.rint(float(out_width * height) / width))
    t_width = int(np.rint(out_width * zoom))
    t_height = int(np.rint(out_height * zoom))
    if t_width // 2 != out_width // 2:
        t_width += 1
    if t_height // 2 != out_height // 2:
        t_height += 1
    # zoom with cubic interpolation
    t_image = transform.resize(image, (t_width, t_height), order=3)
    # crop
    return t_image[(t_width - out_width) / 2:(t_width + out_width) / 2,
                   (t_height - out_height) / 2:(t_height + out_height) / 2]
NeuralNet.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        raw_input("Program paused. Press enter to continue.")
NeuralNetMulti.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        raw_input("Program paused. Press enter to continue.")
NeuralNet.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        input("Program paused. Press enter to continue.")
NeuralNet.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        input("Program paused. Press enter to continue.")
NeuralNet.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        raw_input("Program paused. Press enter to continue.")
NeuralNet.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        raw_input("Program paused. Press enter to continue.")
NeuralNet.py 文件源码 项目:hco-experiments 作者: zooniverse 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def visualiseLearnedFeatures(self):
        """
            Visualise the features learned by the autoencoder
        """
        import matplotlib.pyplot as plt

        extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture
        # number of rows and columns to plot (number of hidden units also stored in self._architecture)
        plotDims = np.rint(np.sqrt(self._architecture[1]))
        plt.ion()
        fig = plt.figure()
        plt.set_cmap("gnuplot")
        plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1)
        learnedFeatures = self.getLearnedFeatures()
        for i in range(self._architecture[1]):
            image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000
            ax = fig.add_subplot(plotDims, plotDims, i)
            plt.axis("off")
            ax.imshow(image, interpolation="nearest")
        plt.show()
        raw_input("Program paused. Press enter to continue.")
convert_data.py 文件源码 项目:aapm_thoracic_challenge 作者: xf4j 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def read_images(path):
    for subdir, dirs, files in os.walk(path):
        dcms = glob.glob(os.path.join(subdir, '*.dcm'))
        if len(dcms) > 1:
            slices = [dicom.read_file(dcm) for dcm in dcms]
            slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
            images = np.stack([s.pixel_array for s in slices], axis=0).astype(np.float32)
            images = images + slices[0].RescaleIntercept
    images = normalize(images)

    inplane_scale = slices[0].PixelSpacing[0] / PIXEL_SPACING
    inplane_size = int(np.rint(inplane_scale * slices[0].Rows / 2) * 2)
    z_scale = slices[0].SliceThickness / SLICE_THICKNESS
    z_size = int(np.rint(z_scale * images.shape[0]))

    if inplane_size != INPLANE_SIZE or z_scale != 1:
        images = resize(images, (z_size, inplane_size, inplane_size), mode='constant')
        if inplane_size != INPLANE_SIZE:
            if inplane_size > INPLANE_SIZE:
                crop = int((inplane_size - INPLANE_SIZE) / 2)
                images = images[:, crop : crop + INPLANE_SIZE, crop : crop + INPLANE_SIZE]
            else:
                pad = int((INPLANE_SIZE - new_size) / 2)
                images = np.pad(images, ((0, 0), (pad, pad), (pad, pad)))

    return images
convert_data.py 文件源码 项目:aapm_thoracic_challenge 作者: xf4j 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def read_images_labels(path):
    # Read the images and labels from a folder containing both dicom files
    for subdir, dirs, files in os.walk(path):
        dcms = glob.glob(os.path.join(subdir, '*.dcm'))
        if len(dcms) == 1:
            structure = dicom.read_file(dcms[0])
            contours = read_structure(structure)
        elif len(dcms) > 1:
            slices = [dicom.read_file(dcm) for dcm in dcms]
            slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
            images = np.stack([s.pixel_array for s in slices], axis=0).astype(np.float32)
            images = images + slices[0].RescaleIntercept

    images = normalize(images)
    labels = get_labels(contours, images.shape, slices)
    inplane_scale = slices[0].PixelSpacing[0] / PIXEL_SPACING
    inplane_size = int(np.rint(inplane_scale * slices[0].Rows / 2) * 2)
    z_scale = slices[0].SliceThickness / SLICE_THICKNESS
    z_size = int(np.rint(z_scale * images.shape[0]))

    if inplane_size != INPLANE_SIZE or z_scale != 1:
        images = resize(images, (z_size, inplane_size, inplane_size), mode='constant')
        new_labels = np.zeros_like(images, dtype=np.float32)
        for z in range(N_CLASSES):
            roi = resize((labels == z + 1).astype(np.float32), (z_size, inplane_size, inplane_size), mode='constant')
            new_labels[roi >= 0.5] = z + 1
        labels = new_labels
        if inplane_size != INPLANE_SIZE:
            if inplane_size > INPLANE_SIZE:
                crop = int((inplane_size - INPLANE_SIZE) / 2)
                images = images[:, crop : crop + INPLANE_SIZE, crop : crop + INPLANE_SIZE]
                labels = labels[:, crop : crop + INPLANE_SIZE, crop : crop + INPLANE_SIZE]
            else:
                pad = int((INPLANE_SIZE - new_size) / 2)
                images = np.pad(images, ((0, 0), (pad, pad), (pad, pad)), 'constant')
                labels = np.pad(labels, ((0, 0), (pad, pad), (pad, pad)), 'constant')

    return images, labels
utils.py 文件源码 项目:aapm_thoracic_challenge 作者: xf4j 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def read_testing_inputs(file, roi, im_size, output_path=None):
    f_h5 = h5py.File(file, 'r')
    if roi == -1:
        images = np.asarray(f_h5['resized_images'], dtype=np.float32)
        read_info = {}
        read_info['shape'] = np.asarray(f_h5['images'], dtype=np.float32).shape
    else:
        images = np.asarray(f_h5['images'], dtype=np.float32)
        output = h5py.File(os.path.join(output_path, 'All_' + os.path.basename(file)), 'r')
        predictions = np.asarray(output['predictions'], dtype=np.float32)
        output.close()
        # Select the roi
        roi_labels = (predictions == roi + 1).astype(np.float32)
        nz = np.nonzero(roi_labels)
        extract = []
        for c in range(3):
            start = np.amin(nz[c])
            end = np.amax(nz[c])
            r = end - start
            extract.append((np.maximum(int(np.rint(start - r * 0.1)), 0),
                            np.minimum(int(np.rint(end + r * 0.1)), images.shape[c])))

        extract_images = images[extract[0][0] : extract[0][1], extract[1][0] : extract[1][1], extract[2][0] : extract[2][1]]
        read_info = {}
        read_info['shape'] = images.shape
        read_info['extract_shape'] = extract_images.shape
        read_info['extract'] = extract

        images = resize(extract_images, im_size, mode='constant')

    f_h5.close()
    return images, read_info


问题


面经


文章

微信
公众号

扫码关注公众号