python类max()的实例源码

nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _zero_one_normalize(predictions, epsilon=1e-7):
    """Normalize the predictions to the range between 0.0 and 1.0.

    For some predictions like SVM predictions, we need to normalize them before
    calculate the interpolated average precision. The normalization will not
    change the rank in the original list and thus won't change the average
    precision.

    Args:
      predictions: a numpy 1-D array storing the sparse prediction scores.
      epsilon: a small constant to avoid denominator being zero.

    Returns:
      The normalized prediction.
    """
    denominator = numpy.max(predictions) - numpy.min(predictions)
    ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
                                                             epsilon)
    return ret
dqn_trainer.py 文件源码 项目:chainer_pong 作者: icoxfog417 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calc_loss(self, states, actions, rewards, next_states, episode_ends):
        qv = self.agent.q(states)
        q_t = self.target(next_states)  # Q(s', *)
        max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32)  # max_a Q(s', a)

        target = cuda.to_cpu(qv.data.copy())
        for i in range(self.replay_size):
            if episode_ends[i][0] is True:
                _r = np.sign(rewards[i])
            else:
                _r = np.sign(rewards[i]) + self.gamma * max_q_prime[i]

            target[i, actions[i]] = _r

        td = Variable(self.target.arr_to_gpu(target)) - qv
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zeros)
        self._loss = loss.data
        self._qv = np.max(qv.data)
        return loss
align_dlib.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
align_dlib.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_accuracy_full_batch(tokens, features, mini_batch_size, word_attn, sent_attn, th=0.5):
    p = []
    l = []
    cnt = 0
    g = gen_minibatch1(tokens, features, mini_batch_size, False)
    for token, feature in g:
        if cnt % 100 == 0:
            print(cnt)
        cnt +=1
#         print token.size()
#         y_pred = get_predictions(token, word_attn, sent_attn)
#         print y_pred
        y_pred = get_predictions(token, feature, word_attn, sent_attn)
#         print y_pred
#         _, y_pred = torch.max(y_pred, 1)
#         y_pred = y_pred[:, 1]
#         print y_pred
        p.append(np.ndarray.flatten(y_pred.data.cpu().numpy()))
    p = [item for sublist in p for item in sublist]
    p = np.array(p)
    return p
nn1.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_accuracy_full_batch(tokens, features, mini_batch_size, word_attn, sent_attn, th=0.5):
    p = []
    l = []
    cnt = 0
    g = gen_minibatch1(tokens, features, mini_batch_size, False)
    for token, feature in g:
        if cnt % 100 == 0:
            print cnt
        cnt +=1
#         print token.size()
#         y_pred = get_predictions(token, word_attn, sent_attn)
#         print y_pred
        y_pred = get_predictions(token, feature, word_attn, sent_attn)
#         print y_pred
#         _, y_pred = torch.max(y_pred, 1)
#         y_pred = y_pred[:, 1]
#         print y_pred
        p.append(np.ndarray.flatten(y_pred.data.cpu().numpy()))
    p = [item for sublist in p for item in sublist]
    p = np.array(p)
    return p
rasterfairy.py 文件源码 项目:RasterFairy 作者: Quasimondo 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def getRectArrangements(n):
    p = prime.Prime()
    f = p.getPrimeFactors(n)
    f_count = len(f)
    ma = multiplyArray(f)
    arrangements = set([(1,ma)])

    if (f_count > 1):
        perms = set(p.getPermutations(f))
        for perm in perms:
            for i in range(1,f_count):
                v1 = multiplyArray(perm[0:i])
                v2 = multiplyArray(perm[i:])
                arrangements.add((min(v1, v2),max(v1, v2)))

    return sorted(list(arrangements), cmp=proportion_sort, reverse=True)
gui.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def update_sort_idcs(self):
        # The selected points are sorted before all the other points -- an easy
        # way to achieve this is to add the maximum score to their score
        if self.current_order == 0:
            score = self.score_x
        elif self.current_order == 1:
            score = self.score_y
        elif self.current_order == 2:
            score = self.score_z
        else:
            raise AssertionError(self.current_order)
        score = score.copy()
        if len(self.selected_points):
            score[np.array(sorted(self.selected_points))] += score.max()

        self.sort_idcs = np.argsort(score)
gui.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def update_data_sort_order(self, new_sort_order=None):
        if new_sort_order is not None:
            self.current_order = new_sort_order
        self.update_sort_idcs()
        self.data_image.set_extent((self.raw_lags[0], self.raw_lags[-1],
                            0, len(self.sort_idcs)))
        self.data_ax.set_ylim(0, len(self.sort_idcs))
        all_raw_data  = self.raw_data
        all_raw_data /= (1 + self.raw_data.mean(1)[:, np.newaxis])
        if len(all_raw_data) > 0:
            cmax          = 0.5*all_raw_data.max()
            cmin          = 0.5*all_raw_data.min()
            all_raw_data  = all_raw_data[self.sort_idcs, :]
        else:
            cmin = 0
            cmax = 1
        self.data_image.set_data(all_raw_data)
        self.data_image.set_clim(cmin, cmax)
        self.data_selection.set_y(len(self.sort_idcs)-len(self.selected_points))
        self.data_selection.set_height(len(self.selected_points))
        self.update_data_plot()
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, top_n=None):
    """Construct an AveragePrecisionCalculator to calculate average precision.

    This class is used to calculate the average precision for a single label.

    Args:
      top_n: A positive Integer specifying the average precision at n, or
        None to use all provided data points.

    Raises:
      ValueError: An error occurred when the top_n is not a positive integer.
    """
    if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
      raise ValueError("top_n must be a positive integer or None.")

    self._top_n = top_n  # average precision at n
    self._total_positives = 0  # total number of positives have seen
    self._heap = []  # max heap of (prediction, actual)
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, top_n=None):
    """Construct an AveragePrecisionCalculator to calculate average precision.

    This class is used to calculate the average precision for a single label.

    Args:
      top_n: A positive Integer specifying the average precision at n, or
        None to use all provided data points.

    Raises:
      ValueError: An error occurred when the top_n is not a positive integer.
    """
    if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
      raise ValueError("top_n must be a positive integer or None.")

    self._top_n = top_n  # average precision at n
    self._total_positives = 0  # total number of positives have seen
    self._heap = []  # max heap of (prediction, actual)
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _zero_one_normalize(predictions, epsilon=1e-7):
    """Normalize the predictions to the range between 0.0 and 1.0.

    For some predictions like SVM predictions, we need to normalize them before
    calculate the interpolated average precision. The normalization will not
    change the rank in the original list and thus won't change the average
    precision.

    Args:
      predictions: a numpy 1-D array storing the sparse prediction scores.
      epsilon: a small constant to avoid denominator being zero.

    Returns:
      The normalized prediction.
    """
    denominator = numpy.max(predictions) - numpy.min(predictions)
    ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
                                                             epsilon)
    return ret
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _zero_one_normalize(predictions, epsilon=1e-7):
    """Normalize the predictions to the range between 0.0 and 1.0.

    For some predictions like SVM predictions, we need to normalize them before
    calculate the interpolated average precision. The normalization will not
    change the rank in the original list and thus won't change the average
    precision.

    Args:
      predictions: a numpy 1-D array storing the sparse prediction scores.
      epsilon: a small constant to avoid denominator being zero.

    Returns:
      The normalized prediction.
    """
    denominator = numpy.max(predictions) - numpy.min(predictions)
    ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
                                                             epsilon)
    return ret
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def getTrainKernel(self, params):
        self.checkParams(params)
        if (self.sameParams(params)): return self.cache['getTrainKernel']

        ell = np.exp(params[0])
        if (self.K_sq is None): K = sq_dist(self.X_scaled.T / ell)  #precompute squared distances
        else: K = self.K_sq / ell**2        
        self.cache['K_sq_scaled'] = K

        # # # #manual computation (just for sanity checks)
        # # # K1 = np.exp(-K / 2.0)
        # # # K2 = np.zeros((self.X_scaled.shape[0], self.X_scaled.shape[0]))
        # # # for i1 in xrange(self.X_scaled.shape[0]):
            # # # for i2 in xrange(i1, self.X_scaled.shape[0]):
                # # # diff = self.X_scaled[i1,:] - self.X_scaled[i2,:]
                # # # K2[i1, i2] = np.exp(-np.sum(diff**2) / (2*ell))
                # # # K2[i2, i1] = K2[i1, i2]               
        # # # print np.max((K1-K2)**2)
        # # # sys.exit(0)

        K_exp = np.exp(-K / 2.0)
        self.cache['getTrainKernel'] = K_exp
        self.saveParams(params)
        return K_exp
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def Kdim(self, kdimParams):
        if (self.prevKdimParams is not None and np.max(np.abs(kdimParams-self.prevKdimParams)) < self.epsilon): return self.cache['Kdim']

        K = np.zeros((self.n, self.n, len(self.kernels)))
        params_ind = 0
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)          
            kernel_params = kdimParams[kernelParams_range]          
            if ((numHyp == 0 and 'Kdim' in self.cache) or (numHyp>0 and self.prevKdimParams is not None and np.max(np.abs(kernel_params-self.prevKdimParams[kernelParams_range])) < self.epsilon)):
                K[:,:,k_i] = self.cache['Kdim'][:,:,k_i]
            else:
                K[:,:,k_i] = k.getTrainKernel(kernel_params)                
            params_ind += numHyp
        self.prevKdimParams = kdimParams.copy()
        self.cache['Kdim'] = K
        return K
gpUtils.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def removeTopPCs(X, numRemovePCs):  
    t0 = time.time()
    X_mean = X.mean(axis=0)
    X -= X_mean
    XXT = symmetrize(blas.dsyrk(1.0, X, lower=0))
    s,U = la.eigh(XXT)
    if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found')
    s[s<0]=0
    ind = np.argsort(s)[::-1]
    U = U[:, ind]
    s = s[ind]
    s = np.sqrt(s)

    #remove null PCs
    ind = (s>1e-6)
    U = U[:, ind]
    s = s[ind]

    V = X.T.dot(U/s)    
    #print 'max diff:', np.max(((U*s).dot(V.T) - X)**2)
    X = (U[:, numRemovePCs:]*s[numRemovePCs:]).dot((V.T)[numRemovePCs:, :])
    X += X_mean

    return X
lungs_var3_d8g_222f.py 文件源码 项目:kaggle_dsb2017 作者: astoc 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def resample(image, scan, new_spacing=[1,1,1]):
    # Determine current pixel spacing
    spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
    spacing = np.array(list(spacing))

    resize_factor = spacing / new_spacing
    new_real_shape = image.shape * resize_factor
    new_shape = np.round(new_real_shape)
    real_resize_factor = new_shape / image.shape
    new_spacing = spacing / real_resize_factor

    #image = scipy.ndimage.interpolation.zoom(image, real_resize_factor)   # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added
    image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')  ### early orig modified 
    #image = scipy.ndimage.zoom(image, real_resize_factor, order=1)    # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)

    #image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1)    # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)

    return image, new_spacing
checkPDFeaturesStrRed.py 文件源码 项目:Homology_BG 作者: jyotikab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def mypsd(Rates,time_range,bin_w = 5., nmax = 4000):

      bins = np.arange(0,len(time_range),1)
      #print bins
      a,b = np.histogram(Rates, bins)
      ff = (1./len(bins))*abs(np.fft.fft(Rates- np.mean(Rates)))**2
      Fs = 1./(1*0.001)
      freq2 = np.fft.fftfreq(len(bins))[0:len(bins/2)+1] # d= dt
      freq = np.fft.fftfreq(len(bins))[:len(ff)/2+1]
      px = ff[0:len(ff)/2+1]
      max_px = np.max(px[1:])
      idx = px == max_px
      corr_freq = freq[pl.find(idx)]
      new_px = px
      max_pow = new_px[pl.find(idx)]
      return new_px,freq,corr_freq[0],freq2, max_pow
checkPDFeaturesStrRed.py 文件源码 项目:Homology_BG 作者: jyotikab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def spec_entropy(Rates,time_range=[],bin_w = 5.,freq_range = []):
    '''Function to calculate the spectral entropy'''

        power,freq,dfreq,dummy,dummy = mypsd(Rates,time_range,bin_w = bin_w)
        if freq_range != []:
                power = power[(freq>=freq_range[0]) & (freq <= freq_range[1])]
                freq = freq[(freq>=freq_range[0]) & (freq <= freq_range[1])]
        maxFreq = freq[np.where(power==np.max(power))]*1000*100
        perMax = (np.max(power)/np.sum(power))*100
        k = len(freq)
        power = power/sum(power)
        sum_power = 0
        for ii in range(k):
                sum_power += (power[ii]*np.log(power[ii]))
        spec_ent = -(sum_power/np.log(k))
        return spec_ent,dfreq,maxFreq,perMax
tip_tilt_2_axes_test.py 文件源码 项目:pi_gcs 作者: lbusoni 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def testStartStopModulation(self):
        radiusInMilliRad= 12.4
        frequencyInHz= 100.
        centerInMilliRad= [-10, 15]
        self._tt.setTargetPosition(centerInMilliRad)
        self._tt.startModulation(radiusInMilliRad,
                                 frequencyInHz,
                                 centerInMilliRad)
        self.assertTrue(
            np.allclose(
                [1, 1, 0],
                self._ctrl.getWaveGeneratorStartStopMode()))
        waveform= self._ctrl.getWaveform(1)
        wants= self._tt._milliRadToGcsUnitsOneAxis(-10, self._tt.AXIS_A)
        got= np.mean(waveform)
        self.assertAlmostEqual(
            wants, got, msg="wants %g, got %g" % (wants, got))
        wants= self._tt._milliRadToGcsUnitsOneAxis(-10 + 12.4, self._tt.AXIS_A)
        got= np.max(waveform)
        self.assertAlmostEqual(
            wants, got, msg="wants %g, got %g" % (wants, got))

        self._tt.stopModulation()
        self.assertTrue(
            np.allclose(centerInMilliRad, self._tt.getTargetPosition()))
pbo_util.py 文件源码 项目:scikit-dataaccess 作者: MITHaystack 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def getLatLonRange(pbo_info, station_list):
    '''
    Retrive the range of latitude and longitude occupied by a set of stations

    @param pbo_info: PBO Metadata
    @param station_list: List of stations

    @return list containg two tuples, lat_range and lon_range
    '''

    coord_list = getStationCoords(pbo_info, station_list)

    lat_list = []
    lon_list = []
    for coord in coord_list:
        lat_list.append(coord[0])
        lon_list.append(coord[1])

    lat_range = (np.min(lat_list), np.max(lat_list))
    lon_range = (np.min(lon_list), np.max(lon_list))

    return [lat_range, lon_range]
voxelchain_visualize.py 文件源码 项目:voxcelchain 作者: hiroaki-kaneda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def conv1(model):
    n1, n2, x, y, z = model.conv1.W.shape
    fig = plt.figure()
    for nn in range(0, n1):
        ax = fig.add_subplot(4, 5, nn+1, projection='3d')
        ax.set_xlim(0.0, x)
        ax.set_ylim(0.0, y)
        ax.set_zlim(0.0, z)
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_zticklabels([])
        for xx in range(0, x):
            for yy in range(0, y):
                for zz in range(0, z):
                    max = np.max(model.conv1.W.data[nn, :])
                    min = np.min(model.conv1.W.data[nn, :])
                    step = (max - min) / 1.0
                    C = (model.conv1.W.data[nn, 0, xx, yy, zz] - min) / step
                    color = cm.cool(C)
                    C = abs(1.0 - C)
                    ax.plot(np.array([xx]), np.array([yy]), np.array([zz]), "o", color=color, ms=7.0*C, mew=0.1)

    plt.savefig("result/graph_conv1.png")
voxelchain_visualize.py 文件源码 项目:voxcelchain 作者: hiroaki-kaneda 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def create_graph():
    logfile = 'result/log'
    xs = []
    ys = []
    ls = []
    f = open(logfile, 'r')
    data = json.load(f)

    print(data)

    for d in data:
        xs.append(d["iteration"])
        ys.append(d["main/accuracy"])
        ls.append(d["main/loss"])

    plt.clf()
    plt.cla()
    plt.hlines(1, 0, np.max(xs), colors='r', linestyles="dashed")  # y=-1, 1??????
    plt.title(r"loss/accuracy")
    plt.plot(xs, ys, label="accuracy")
    plt.plot(xs, ls, label="loss")
    plt.legend()
    plt.savefig("result/log.png")
Visualizer.py 文件源码 项目:rank-ordered-autoencoder 作者: paulbertens 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def reshapeWeights(self, weights, normalize=True, modifier=None):
        # reshape the weights matrix to a grid for visualization
        n_rows = int(np.sqrt(weights.shape[1]))
        n_cols = int(np.sqrt(weights.shape[1]))
        kernel_size = int(np.sqrt(weights.shape[0]/3))
        weights_grid = np.zeros((int((np.sqrt(weights.shape[0]/3)+1)*n_rows), int((np.sqrt(weights.shape[0]/3)+1)*n_cols), 3), dtype=np.float32)
        for i in range(weights_grid.shape[0]/(kernel_size+1)):
            for j in range(weights_grid.shape[1]/(kernel_size+1)):
                index = i * (weights_grid.shape[0]/(kernel_size+1))+j
                if not np.isclose(np.sum(weights[:, index]), 0):
                    if normalize:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size]=\
                            (weights[:, index].reshape(kernel_size, kernel_size, 3) - np.min(weights[:, index])) / ((np.max(weights[:, index]) - np.min(weights[:, index])) + 1.e-6)
                    else:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] =\
                        (weights[:, index].reshape(kernel_size, kernel_size, 3))
                    if modifier is not None:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] *= modifier[index]

        return weights_grid
features.py 文件源码 项目:detection-2016-nipsws 作者: imatge-upc 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def extract_features_from_roi(roi):
    roi_width = roi.shape[1]
    roi_height = roi.shape[2]
    new_width = roi_width / feature_size
    new_height = roi_height / feature_size
    pooled_values = np.zeros([feature_size, feature_size, 512])
    for j in range(512):
        for i in range(feature_size):
            for k in range(feature_size):
                if k == (feature_size-1) & i == (feature_size-1):
                    patch = roi[j, i * new_width:roi_width, k * new_height:roi_height]
                elif k == (feature_size-1):
                    patch = roi[j, i * new_width:(i + 1) * new_width, k * new_height:roi_height]
                elif i == (feature_size-1):
                    patch = roi[j, i * new_width:roi_width, k * new_height:(k + 1) * new_height]
                else:
                    patch = roi[j, i * new_width:(i + 1) * new_width, k * new_height:(k + 1) * new_height]
                pooled_values[i, k, j] = np.max(patch)
    return pooled_values
indoor3d_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
                                random_sample, sample_num, sample_aug):
    """ room2block, with input filename and RGB preprocessing.
        for each block centralize XYZ, add normalized XYZ as 678 channels
    """
    data = data_label[:,0:6]
    data[:,3:6] /= 255.0
    label = data_label[:,-1].astype(np.uint8)
    max_room_x = max(data[:,0])
    max_room_y = max(data[:,1])
    max_room_z = max(data[:,2])

    data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,
                                          random_sample, sample_num, sample_aug)
    new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
    for b in range(data_batch.shape[0]):
        new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
        new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
        new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
        minx = min(data_batch[b, :, 0])
        miny = min(data_batch[b, :, 1])
        data_batch[b, :, 0] -= (minx+block_size/2)
        data_batch[b, :, 1] -= (miny+block_size/2)
    new_data_batch[:, :, 0:6] = data_batch
    return new_data_batch, label_batch
indoor3d_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def room2samples_plus_normalized(data_label, num_point):
    """ room2sample, with input filename and RGB preprocessing.
        for each block centralize XYZ, add normalized XYZ as 678 channels
    """
    data = data_label[:,0:6]
    data[:,3:6] /= 255.0
    label = data_label[:,-1].astype(np.uint8)
    max_room_x = max(data[:,0])
    max_room_y = max(data[:,1])
    max_room_z = max(data[:,2])
    #print(max_room_x, max_room_y, max_room_z)

    data_batch, label_batch = room2samples(data, label, num_point)
    new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
    for b in range(data_batch.shape[0]):
        new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
        new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
        new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
        #minx = min(data_batch[b, :, 0])
        #miny = min(data_batch[b, :, 1])
        #data_batch[b, :, 0] -= (minx+block_size/2)
        #data_batch[b, :, 1] -= (miny+block_size/2)
    new_data_batch[:, :, 0:6] = data_batch
    return new_data_batch, label_batch
recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mine(self, im, gt_bboxes): 
        """
        Propose bounding boxes using proposer, and
        augment non-overlapping boxes with IoU < 0.1
        to the ground truth set.
        (up to a maximum of num_proposals)
        """
        bboxes = self.proposer_.process(im)

        if len(gt_bboxes): 
            # Determine bboxes that have low IoU with ground truth
            # iou = [N x GT]
            iou = brute_force_match(bboxes, gt_bboxes, 
                                    match_func=lambda x,y: intersection_over_union(x,y))
            # print('Detected {}, {}, {}'.format(iou.shape, len(gt_bboxes), len(bboxes))) # , np.max(iou, axis=1)
            overlap_inds, = np.where(np.max(iou, axis=1) < 0.1)
            bboxes = bboxes[overlap_inds]
            # print('Remaining non-overlapping {}'.format(len(bboxes)))

        bboxes = bboxes[:self.num_proposals_]
        targets = self.generate_targets(len(bboxes))
        return bboxes, targets
basic_model.py 文件源码 项目:sea-lion-counter 作者: rdinse 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def inc_region(self, dst, y, x, h, w):
    '''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on
    np.int16.'''

    dh, dw = dst.shape
    h2 = h // 2
    w2 = w // 2
    py = y - h2 
    px = x - w2 
    y_min = max(0, py)
    y_max = min(dh, y + h2)
    x_min = max(0, px)
    x_max = min(dw, x + w2)
    if y_max - y_min <= 0 or x_max - x_min <= 0:
      return

    dst[y_min:y_max, x_min:x_max] += 1


问题


面经


文章

微信
公众号

扫码关注公众号