python类maximum()的实例源码

loss.py 文件源码 项目:Dense-Net 作者: achyudhk 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def svm_loss(x, y):
    """
    Computes the loss and gradient using for multiclass SVM classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
      for the ith input.
    - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
      0 <= y[i] < C

    Returns a tuple of:
    - loss: Scalar giving the loss
    - dx: Gradient of the loss with respect to x
    """
    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]
    margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
    margins[np.arange(N), y] = 0
    loss = np.sum(margins) / N
    num_pos = np.sum(margins > 0, axis=1)
    dx = np.zeros_like(x)
    dx[margins > 0] = 1
    dx[np.arange(N), y] -= num_pos
    dx /= N
    return loss, dx
utils.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def random_hsv_image(bgr_image, delta_hue, delta_sat_scale, delta_val_scale):
    hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV).astype(np.float32)

    # hue
    hsv_image[:, :, 0] += int((np.random.rand() * delta_hue * 2 - delta_hue) * 255)

    # sat
    sat_scale = 1 + np.random.rand() * delta_sat_scale * 2 - delta_sat_scale
    hsv_image[:, :, 1] *= sat_scale

    # val
    val_scale = 1 + np.random.rand() * delta_val_scale * 2 - delta_val_scale
    hsv_image[:, :, 2] *= val_scale

    hsv_image[hsv_image < 0] = 0
    hsv_image[hsv_image > 255] = 255
    hsv_image = hsv_image.astype(np.uint8)
    bgr_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
    return bgr_image

# non maximum suppression
utils.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def reshape_to_yolo_size(img):
    input_width, input_height = img.size
    min_pixel = 320.0
    #max_pixel = 608
    max_pixel = 1024.0

    min_edge = np.minimum(input_width, input_height)
    if min_edge < min_pixel:
        input_width *= min_pixel / min_edge
        input_height *= min_pixel / min_edge
    max_edge = np.maximum(input_width, input_height)
    if max_edge > max_pixel:
        input_width *= max_pixel / max_edge
        input_height *= max_pixel / max_edge

    input_width = int(input_width / 32.0 + round(input_width % 32 / 32.0)) * 32
    input_height = int(input_height / 32.0 + round(input_height % 32 / 32.0)) * 32
    img = img.resize((input_width, input_height))

    return img
test_umath.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_reduce(self):
        dflt = np.typecodes['AllFloat']
        dint = np.typecodes['AllInteger']
        seq1 = np.arange(11)
        seq2 = seq1[::-1]
        func = np.maximum.reduce
        for dt in dint:
            tmp1 = seq1.astype(dt)
            tmp2 = seq2.astype(dt)
            assert_equal(func(tmp1), 10)
            assert_equal(func(tmp2), 10)
        for dt in dflt:
            tmp1 = seq1.astype(dt)
            tmp2 = seq2.astype(dt)
            assert_equal(func(tmp1), 10)
            assert_equal(func(tmp2), 10)
            tmp1[::2] = np.nan
            tmp2[::2] = np.nan
            assert_equal(func(tmp1), np.nan)
            assert_equal(func(tmp2), np.nan)
test_umath.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_truth_table_logical(self):
        # 2, 3 and 4 serves as true values
        input1 = [0, 0, 3, 2]
        input2 = [0, 4, 0, 2]

        typecodes = (np.typecodes['AllFloat']
                     + np.typecodes['AllInteger']
                     + '?')     # boolean
        for dtype in map(np.dtype, typecodes):
            arg1 = np.asarray(input1, dtype=dtype)
            arg2 = np.asarray(input2, dtype=dtype)

            # OR
            out = [False, True, True, True]
            for func in (np.logical_or, np.maximum):
                assert_equal(func(arg1, arg2).astype(bool), out)
            # AND
            out = [False, False, False, True]
            for func in (np.logical_and, np.minimum):
                assert_equal(func(arg1, arg2).astype(bool), out)
            # XOR
            out = [False, True, True, False]
            for func in (np.logical_xor, np.not_equal):
                assert_equal(func(arg1, arg2).astype(bool), out)
test_core.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def test_minmax_func(self):
        # Tests minimum and maximum.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        # max doesn't work if shaped
        xr = np.ravel(x)
        xmr = ravel(xm)
        # following are true because of careful selection of data
        assert_equal(max(xr), maximum(xmr))
        assert_equal(min(xr), minimum(xmr))

        assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
        assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
        x = arange(5)
        y = arange(5) - 2
        x[3] = masked
        y[0] = masked
        assert_equal(minimum(x, y), where(less(x, y), x, y))
        assert_equal(maximum(x, y), where(greater(x, y), x, y))
        assert_(minimum(x) == 0)
        assert_(maximum(x) == 4)

        x = arange(4).reshape(2, 2)
        x[-1, -1] = masked
        assert_equal(maximum(x), 2)
imgOp.py 文件源码 项目:LiviaNET 作者: josedolz 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def applyPadding(inputImg, sampleSizes, receptiveField) : 
    receptiveField_arr = np.asarray(receptiveField, dtype="int16")
    inputImg_arr = np.asarray(inputImg.shape,dtype="int16")

    receptiveField = np.array(receptiveField, dtype="int16")

    left_padding = (receptiveField - 1) / 2
    right_padding = receptiveField - 1 - left_padding

    extra_padding = np.maximum(0, np.asarray(sampleSizes,dtype="int16")-(inputImg_arr+left_padding+right_padding))
    right_padding += extra_padding  

    paddingValues = ( (left_padding[0],right_padding[0]),
                      (left_padding[1],right_padding[1]),
                      (left_padding[2],right_padding[2]))

    paddedImage = lib.pad(inputImg, paddingValues, mode='reflect' )
    return [paddedImage, paddingValues]

# ----- Apply unpadding ---------
calibration_utils.py 文件源码 项目:introspective 作者: numeristical 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _natural_cubic_spline_basis_expansion(xpts,knots):
    num_knots = len(knots)
    num_pts = len(xpts)
    outmat = np.zeros((num_pts,num_knots))
    outmat[:,0]= np.ones(num_pts)
    outmat[:,1] = xpts
    def make_func_H(k):
        def make_func_d(k):
            def func_d(x):
                denom = knots[-1] - knots[k-1]
                numer = np.maximum(x-knots[k-1],np.zeros(len(x)))**3 - np.maximum(x-knots[-1],np.zeros(len(x)))**3
                return numer/denom
            return func_d
        def func_H(x):
            d_fun_k = make_func_d(k)
            d_fun_Km1 = make_func_d(num_knots-1)
            return d_fun_k(x) -  d_fun_Km1(x)
        return func_H
    for i in range(1,num_knots-1):
        curr_H_fun = make_func_H(i)
        outmat[:,i+1] = curr_H_fun(xpts)
    return outmat
analyze_dets.py 文件源码 项目:blitznet 作者: dvornikita 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def batch_iou(proposals, gt):
    bboxes = np.transpose(proposals).reshape((4, -1, 1))
    bboxes_x1 = bboxes[0]
    bboxes_x2 = bboxes[0]+bboxes[2]
    bboxes_y1 = bboxes[1]
    bboxes_y2 = bboxes[1]+bboxes[3]

    gt = np.transpose(gt).reshape((4, 1, -1))
    gt_x1 = gt[0]
    gt_x2 = gt[0]+gt[2]
    gt_y1 = gt[1]
    gt_y2 = gt[1]+gt[3]

    widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
                        np.maximum(bboxes_x1, gt_x1))
    heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
                         np.maximum(bboxes_y1, gt_y1))
    intersection = widths*heights
    union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
    return (intersection / union)
utils.py 文件源码 项目:blitznet 作者: dvornikita 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def batch_iou(proposals, gt):
    bboxes = np.transpose(proposals).reshape((4, -1, 1))
    bboxes_x1 = bboxes[0]
    bboxes_x2 = bboxes[0]+bboxes[2]
    bboxes_y1 = bboxes[1]
    bboxes_y2 = bboxes[1]+bboxes[3]

    gt = np.transpose(gt).reshape((4, 1, -1))
    gt_x1 = gt[0]
    gt_x2 = gt[0]+gt[2]
    gt_y1 = gt[1]
    gt_y2 = gt[1]+gt[3]

    widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
                        np.maximum(bboxes_x1, gt_x1))
    heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
                         np.maximum(bboxes_y1, gt_y1))
    intersection = widths*heights
    union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
    return (intersection / union)
utils.py 文件源码 项目:blitznet 作者: dvornikita 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decode_bboxes(tcoords, anchors):
    var_x, var_y, var_w, var_h = config['prior_variance']
    t_x = tcoords[:, 0]*var_x
    t_y = tcoords[:, 1]*var_y
    t_w = tcoords[:, 2]*var_w
    t_h = tcoords[:, 3]*var_h
    a_w = anchors[:, 2]
    a_h = anchors[:, 3]
    a_x = anchors[:, 0]+a_w/2
    a_y = anchors[:, 1]+a_h/2
    x = t_x*a_w + a_x
    y = t_y*a_h + a_y
    w = tf.exp(t_w)*a_w
    h = tf.exp(t_h)*a_h

    x1 = tf.maximum(0., x - w/2)
    y1 = tf.maximum(0., y - h/2)
    x2 = tf.minimum(1., w + x1)
    y2 = tf.minimum(1., h + y1)
    return tf.stack([y1, x1, y2, x2], axis=1)
base_sampler.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def plot(self, image, filename, save_sample):
        """ Plot an image."""
        image = np.minimum(image, 1)
        image = np.maximum(image, -1)
        image = np.squeeze(image)
        # Scale to 0..255.
        imin, imax = image.min(), image.max()
        image = (image - imin) * 255. / (imax - imin) + .5
        image = image.astype(np.uint8)
        if save_sample:
            try:
                Image.fromarray(image).save(filename)
            except Exception as e:
                print("Warning: could not sample to ", filename, ".  Please check permissions and make sure the path exists")
                print(e)
        GlobalViewer.update(image)
common.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def sample_output(self, val):
        vocabulary = self.get_vocabulary()
        if self.one_hot:
            vals = [ np.argmax(r) for r in val ]
            ox_val = [vocabulary[obj] for obj in list(vals)]
            string = "".join(ox_val)
            return string
        else:
            val = np.reshape(val, [-1])
            val *= len(vocabulary)/2.0
            val += len(vocabulary)/2.0
            val = np.round(val)

            val = np.maximum(0, val)
            val = np.minimum(len(vocabulary)-1, val)

            ox_val = [self.get_character(obj) for obj in list(val)]
            string = "".join(ox_val)
            return string
lang2vec.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_union_sets(lang_codes, feature_set_str):
    feature_set_parts = feature_set_str.split("|")
    feature_names, feature_values = get_named_set(lang_codes, feature_set_parts[0])
    for feature_set_part in feature_set_parts[1:]:
        more_feature_names, more_feature_values = get_named_set(lang_codes, feature_set_part)
        if len(feature_names) != len(more_feature_names):
            print("ERROR: Cannot perform elementwise union on feature sets of different size")
            sys.exit(0)
        feature_values = np.maximum(feature_values, more_feature_values)
    return feature_names, feature_values
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_precision(self):
        """Compute data precision matrix with the generative model.
        Equals the inverse of the covariance but computed with
        the matrix inversion lemma for efficiency.
        Returns
        -------
        precision : array, shape=(n_features, n_features)
            Estimated precision of data.
        """
        n_features = self.components_.shape[1]

        # handle corner cases first
        if self.n_components_ == 0:
            return np.eye(n_features) / self.noise_variance_
        if self.n_components_ == n_features:
            return linalg.inv(self.get_covariance())

        # Get precision using matrix inversion lemma
        components_ = self.components_
        exp_var = self.explained_variance_
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        precision = np.dot(components_, components_.T) / self.noise_variance_
        precision.flat[::len(precision) + 1] += 1. / exp_var_diff
        precision = np.dot(components_.T,
                           np.dot(linalg.inv(precision), components_))
        precision /= -(self.noise_variance_ ** 2)
        precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
        return precision
voc_eval.py 文件源码 项目:squeezeDet-hand 作者: fyhtea 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap
mklmm.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def sq_dist(a, b=None):
    #mean-center for numerical stability
    D, n = a.shape[0], a.shape[1]
    if (b is None):
        mu = a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b = a
        m = n
        aSq = np.sum(a**2, axis=0)
        bSq = aSq
    else:
        d, m = b.shape[0], b.shape[1]
        if (d != D): raise Exception('column lengths must agree')
        mu = (float(m)/float(m+n))*b.mean(axis=1) + (float(n)/float(m+n))*a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b -= mu[:, np.newaxis]      
        aSq = np.sum(a**2, axis=0)
        bSq = np.sum(b**2, axis=0)

    C = np.tile(np.column_stack(aSq).T, (1, m)) + np.tile(bSq, (n, 1)) - 2*a.T.dot(b)
    C = np.maximum(C, 0)    #remove numerical noise
    return C

#evaluate 'power sums' of the individual terms in Z
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, X):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        if (X.shape[1] >= X.shape[0] or True): self.K_sq = sq_dist(self.X_scaled.T)
        else: self.K_sq = None
        self.j = np.floor(X.shape[1]/2.0)+self.v+1
        self.pp = lambda r,j,v,f:  np.maximum(1-r, 0)**(j+v) * self.f(r,j)
        self.dpp = lambda r,j,v,f: np.maximum(1-r, 0)**(j+v-1) * r * ((j+v)*f(r,j) - np.maximum(1-r,0)*self.df(r,j))


问题


面经


文章

微信
公众号

扫码关注公众号