python类size()的实例源码

core.py 文件源码 项目:Neural-Network 作者: yzldw333 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def unRollImagesForConv(self,index):
        '''?????index?????
            ?? ?width*?height,   ?????*?channel?  ????size??????
        '''
        i = index
        old_images = self.last_layer.images
        m,old_channel,old_height,old_width = old_images.shape
        newData = []
        #Process unroll the data
        for h in range(0,old_height-self.squareSize+1,self.stride):
            for w in range(0,old_width-self.squareSize+1,self.stride):
                tmp = []
                for c in range(old_channel):
                    tmp.append(old_images[i,c,h:h+self.squareSize,w:w+self.squareSize].reshape(1,self.squareSize**2))
                    #h,w?????,??????????,??old_channel   *   squaireSize?????
                tmp = np.array(tmp).reshape(1,self.squareSize**2*old_channel)
                #?????reshape,????????,????newData
                newData.append(tmp)
                #??????????????????,?????????
        newData = np.array(newData).reshape(self.width*self.height,self.squareSize**2*old_channel)
        return newData
core.py 文件源码 项目:Neural-Network 作者: yzldw333 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def backward_compute(self):
        m = np.size(self.images,0)
        self.delta = self.delta.reshape(m,self.channel,self.height,self.width)
        newDelta = np.zeros([m,self.last_layer.channel,self.last_layer.height,self.last_layer.width])
        for i in range(m):
            for j in range(self.channel):
                for h in range(self.height):
                    for w in range(self.width):
                        tmpLoc = self.maxIndex[i,j,h*self.width+w]
                        relativeH = tmpLoc//self.squareSize
                        relativeW = tmpLoc - relativeH * self.squareSize
                        lastW = w*self.stride+relativeW
                        lastH = h*self.stride+relativeH
                        newDelta[i,j,lastH,lastW] += self.delta[i,j,h,w]
        self.last_layer.delta = newDelta
        pass
functions.py 文件源码 项目:DEPICT 作者: herandy 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def bestMap(L1, L2):
    if L1.__len__() != L2.__len__():
        print('size(L1) must == size(L2)')

    Label1 = np.unique(L1)
    nClass1 = Label1.__len__()
    Label2 = np.unique(L2)
    nClass2 = Label2.__len__()

    nClass = max(nClass1, nClass2)
    G = np.zeros((nClass, nClass))
    for i in range(nClass1):
        for j in range(nClass2):
            G[i][j] = np.nonzero((L1 == Label1[i]) * (L2 == Label2[j]))[0].__len__()

    c = linear_assignment_.linear_assignment(-G.T)[:, 1]
    newL2 = np.zeros(L2.__len__())
    for i in range(nClass2):
        for j in np.nonzero(L2 == Label2[i])[0]:
            if len(Label1) > c[i]:
                newL2[j] = Label1[c[i]]

    return accuracy_score(L1, newL2)
math_utils.py 文件源码 项目:SERT 作者: cvangysel 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def entropy(pk, *args, **kwargs):
    """Proxy for scipy.stats.entropy, with normalized Shannon entropy."""
    if 'normalize' in kwargs:
        normalize = kwargs['normalize']
        del kwargs['normalize']
    else:
        normalize = False

    e = scipy.stats.entropy(pk, *args, **kwargs)

    if normalize:
        num_classes = np.size(pk)
        base = kwargs['base'] if 'base' in kwargs else None

        maximum_entropy = np.log(num_classes)
        if base:
            maximum_entropy /= np.log(base)

        e /= maximum_entropy

    return e
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def save_hough(self, lines, clmap):
        """
        :param lines: (rho, theta) pairs
        :param clmap: clusters assigned to lines
        :return: None
        """
        height, width = self.image.shape
        ratio = 600. * (self.step+1) / min(height, width)
        temp = cv2.resize(self.image, None, fx=ratio, fy=ratio,
                          interpolation=cv2.INTER_CUBIC)
        temp = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
        colors = [(0, 127, 255), (255, 0, 127)]

        for i in range(0, np.size(lines) / 2):
            rho = lines[i, 0]
            theta = lines[i, 1]
            color = colors[clmap[i, 0]]
            if theta < np.pi / 4 or theta > 3 * np.pi / 4:
                pt1 = (rho / np.cos(theta), 0)
                pt2 = (rho - height * np.sin(theta) / np.cos(theta), height)
            else:
                pt1 = (0, rho / np.sin(theta))
                pt2 = (width, (rho - width * np.cos(theta)) / np.sin(theta))
            pt1 = (int(pt1[0]), int(pt1[1]))
            pt2 = (int(pt2[0]), int(pt2[1]))
            cv2.line(temp, pt1, pt2, color, 5)

        self.save2image(temp)
utils.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def morph(roi):
    ratio = min(28. / np.size(roi, 0), 28. / np.size(roi, 1))
    roi = cv2.resize(roi, None, fx=ratio, fy=ratio,
                     interpolation=cv2.INTER_NEAREST)
    dx = 28 - np.size(roi, 1)
    dy = 28 - np.size(roi, 0)
    px = ((int(dx / 2.)), int(np.ceil(dx / 2.)))
    py = ((int(dy / 2.)), int(np.ceil(dy / 2.)))
    squared = np.pad(roi, (py, px), 'constant', constant_values=0)
    return squared
sudoku.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def get_label_batch(label_data, batch_size, batch_index):
    nrof_examples = np.size(label_data, 0)
    j = batch_index*batch_size % nrof_examples
    if j+batch_size<=nrof_examples:
        batch = label_data[j:j+batch_size]
    else:
        x1 = label_data[j:nrof_examples]
        x2 = label_data[0:nrof_examples-j]
        batch = np.vstack([x1,x2])
    batch_int = batch.astype(np.int64)
    return batch_int
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_batch(image_data, batch_size, batch_index):
    nrof_examples = np.size(image_data, 0)
    j = batch_index*batch_size % nrof_examples
    if j+batch_size<=nrof_examples:
        batch = image_data[j:j+batch_size,:,:,:]
    else:
        x1 = image_data[j:nrof_examples,:,:,:]
        x2 = image_data[0:nrof_examples-j,:,:,:]
        batch = np.vstack([x1,x2])
    batch_float = batch.astype(np.float32)
    return batch_float
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_accuracy(threshold, dist, actual_issame):
    predict_issame = np.less(dist, threshold)
    tp = np.sum(np.logical_and(predict_issame, actual_issame))
    fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
    tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
    fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))

    tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
    fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
    acc = float(tp+tn)/dist.size
    return tpr, fpr, acc
test_fields.py 文件源码 项目:pyfds 作者: emtpb 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def test_field2d_init():
    # create a field where the main material is 5
    fld = fls.Field2D(100, 0.1, 100, 0.1, 100, 0.1, int(5))
    # check if the "material parameter" 'real' for the complete field is 5
    assert np.allclose(fld.material_vector('real'), 5)
    assert np.size(fld.material_vector('real')) == 10000
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def heap_size(self):
    """Gets the heap size maintained in the class."""
    return len(self._heap)
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def accumulate(self, predictions, actuals, num_positives=None):
    """Accumulate the predictions and their ground truth labels.

    After the function call, we may call peek_ap_at_n to actually calculate
    the average precision.
    Note predictions and actuals must have the same shape.

    Args:
      predictions: a list storing the prediction scores.
      actuals: a list storing the ground truth labels. Any value
      larger than 0 will be treated as positives, otherwise as negatives.
      num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
      then it's possible some true positives were missed in them. In that case,
      you can provide 'num_positives' in order to accurately track recall.

    Raises:
      ValueError: An error occurred when the format of the input is not the
      numpy 1-D array or the shape of predictions and actuals does not match.
    """
    if len(predictions) != len(actuals):
      raise ValueError("the shape of predictions and actuals does not match.")

    if not num_positives is None:
      if not isinstance(num_positives, numbers.Number) or num_positives < 0:
        raise ValueError("'num_positives' was provided but it wan't a nonzero number.")

    if not num_positives is None:
      self._total_positives += num_positives
    else:
      self._total_positives += numpy.size(numpy.where(actuals > 0))
    topk = self._top_n
    heap = self._heap

    for i in range(numpy.size(predictions)):
      if topk is None or len(heap) < topk:
        heapq.heappush(heap, (predictions[i], actuals[i]))
      else:
        if predictions[i] > heap[0][0]:  # heap[0] is the smallest
          heapq.heappop(heap)
          heapq.heappush(heap, (predictions[i], actuals[i]))
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def heap_size(self):
    """Gets the heap size maintained in the class."""
    return len(self._heap)
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def accumulate(self, predictions, actuals, num_positives=None):
    """Accumulate the predictions and their ground truth labels.

    After the function call, we may call peek_ap_at_n to actually calculate
    the average precision.
    Note predictions and actuals must have the same shape.

    Args:
      predictions: a list storing the prediction scores.
      actuals: a list storing the ground truth labels. Any value
      larger than 0 will be treated as positives, otherwise as negatives.
      num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
      then it's possible some true positives were missed in them. In that case,
      you can provide 'num_positives' in order to accurately track recall.

    Raises:
      ValueError: An error occurred when the format of the input is not the
      numpy 1-D array or the shape of predictions and actuals does not match.
    """
    if len(predictions) != len(actuals):
      raise ValueError("the shape of predictions and actuals does not match.")

    if not num_positives is None:
      if not isinstance(num_positives, numbers.Number) or num_positives < 0:
        raise ValueError("'num_positives' was provided but it wan't a nonzero number.")

    if not num_positives is None:
      self._total_positives += num_positives
    else:
      self._total_positives += numpy.size(numpy.where(actuals > 0))
    topk = self._top_n
    heap = self._heap

    for i in range(numpy.size(predictions)):
      if topk is None or len(heap) < topk:
        heapq.heappush(heap, (predictions[i], actuals[i]))
      else:
        if predictions[i] > heap[0][0]:  # heap[0] is the smallest
          heapq.heappop(heap)
          heapq.heappush(heap, (predictions[i], actuals[i]))
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def heap_size(self):
    """Gets the heap size maintained in the class."""
    return len(self._heap)
mklmm.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2
image_helper.py 文件源码 项目:detection-2016-nipsws 作者: imatge-upc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_all_images(image_names, path_voc):
    images = []
    for j in range(np.size(image_names)):
        image_name = image_names[0][j]
        string = path_voc + '/JPEGImages/' + image_name + '.jpg'
        images.append(image.load_img(string, False))
    return images


问题


面经


文章

微信
公众号

扫码关注公众号