python类minimum()的实例源码

base_sampler.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def plot(self, image, filename, save_sample):
        """ Plot an image."""
        image = np.minimum(image, 1)
        image = np.maximum(image, -1)
        image = np.squeeze(image)
        # Scale to 0..255.
        imin, imax = image.min(), image.max()
        image = (image - imin) * 255. / (imax - imin) + .5
        image = image.astype(np.uint8)
        if save_sample:
            try:
                Image.fromarray(image).save(filename)
            except Exception as e:
                print("Warning: could not sample to ", filename, ".  Please check permissions and make sure the path exists")
                print(e)
        GlobalViewer.update(image)
common.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sample_output(self, val):
        vocabulary = self.get_vocabulary()
        if self.one_hot:
            vals = [ np.argmax(r) for r in val ]
            ox_val = [vocabulary[obj] for obj in list(vals)]
            string = "".join(ox_val)
            return string
        else:
            val = np.reshape(val, [-1])
            val *= len(vocabulary)/2.0
            val += len(vocabulary)/2.0
            val = np.round(val)

            val = np.maximum(0, val)
            val = np.minimum(len(vocabulary)-1, val)

            ox_val = [self.get_character(obj) for obj in list(val)]
            string = "".join(ox_val)
            return string
adver.py 文件源码 项目:facejack 作者: PetarV- 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def iterate(self, x, eps=32, alp=1.0):
        num_iter = min(eps + 4, 1.25 * eps)
        loss = 1.0
        x = np.copy(x)
        while loss > 0 and num_iter > 0:
            inp = x.reshape((1,) + inp_size)
            outs = self.f_outputs([inp, 0])
            loss = outs[0]
            print('Loss: ', loss)
            grads = np.array(outs[1:]).reshape(inp_size)
            s_grads = np.sign(grads)
            adv_x = x - alp * s_grads
            sub_x = np.minimum(x + eps, np.maximum(x - eps, adv_x))
            next_x = preprocess_img(np.clip(deprocess_img(sub_x), 0.0, 255.0))
            x = next_x
            confidence = self.mdl.predict(x.reshape((1,) + inp_size))[0][0]
            print('Current confidence value: ', confidence) #'minval =', min_val)
            yield (deprocess_img(x), confidence)
            num_iter -= 1
speech_utils.py 文件源码 项目:speech_feature_extractor 作者: ZhihaoDU 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def calc_stoi_from_spec(clean_spec, degraded_spec, analysis_len=30):
    freq_bins = np.size(clean_spec, 0)
    frames = np.size(clean_spec, 1)
    x = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    y = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            x[j, m] = clean_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = degraded_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = np.minimum(np.linalg.norm(x[j,m,:])/np.linalg.norm(y[j,m,:])*y[j,m,:],
                                 (1.+np.power(10., 15./20.))*x[j,m,:])  # y is normalized and clipped
    x_mean = np.mean(x, axis=(0, 1))
    y_mean = np.mean(y, axis=(0, 1))
    score = 0.
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            score += np.dot(x[j, m, :] - x_mean, y[j, m, :] - y_mean) / \
                     (np.linalg.norm(x[j, m, :] - x_mean) * np.linalg.norm(y[j, m, :] - y_mean))
    score /= (freq_bins * analysis_len)
    return score
feature_extractor.py 文件源码 项目:speech_feature_extractor 作者: ZhihaoDU 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_fft_mel_mat(nfft, sr=8000, nfilts=None, width=1.0, minfrq=20, maxfrq=None, constamp=0):
    if nfilts is None:
        nfilts = nfft
    if maxfrq is None:
        maxfrq = sr // 2
    wts = np.zeros((nfilts, nfft//2+1))
    fftfrqs = np.arange(0, nfft//2+1) / (1. * nfft) * (sr)
    minmel = hz2mel(minfrq)
    maxmel = hz2mel(maxfrq)
    binfrqs = mel2hz(minmel + np.arange(0, nfilts+2) / (nfilts+1.) * (maxmel - minmel))
    # binbin = np.round(binfrqs / maxfrq * nfft)
    for i in range(nfilts):
        fs = binfrqs[[i+0, i+1, i+2]]
        fs = fs[1] + width * (fs - fs[1])
        loslope = (fftfrqs - fs[0]) / (fs[1] - fs[0])
        hislope = (fs[2] - fftfrqs) / (fs[2] - fs[1])
        wts[i, :] = np.maximum(0, np.minimum(loslope, hislope))
    return wts
mfcc_extractor.py 文件源码 项目:speech_feature_extractor 作者: ZhihaoDU 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_fft_mel_mat(nfft, sr=8000, nfilts=None, width=1.0, minfrq=20, maxfrq=None, constamp=0):
    if nfilts is None:
        nfilts = nfft
    if maxfrq is None:
        maxfrq = sr // 2
    wts = np.zeros((nfilts, nfft//2+1))
    fftfrqs = np.arange(0, nfft//2+1) / (1. * nfft) * (sr)
    minmel = hz2mel(minfrq)
    maxmel = hz2mel(maxfrq)
    binfrqs = mel2hz(minmel + np.arange(0, nfilts+2) / (nfilts+1.) * (maxmel - minmel))
    # binbin = np.round(binfrqs / maxfrq * nfft)
    for i in range(nfilts):
        fs = binfrqs[[i+0, i+1, i+2]]
        fs = fs[1] + width * (fs - fs[1])
        loslope = (fftfrqs - fs[0]) / (fs[1] - fs[0])
        hislope = (fs[2] - fftfrqs) / (fs[2] - fs[1])
        wts[i, :] = np.maximum(0, np.minimum(loslope, hislope))
    return wts
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def preprocess(self, strokes):
    """Remove entries from strokes having > max_seq_length points."""
    raw_data = []
    seq_len = []
    count_data = 0

    for i in range(len(strokes)):
      data = strokes[i]
      if len(data) <= (self.max_seq_length):
        count_data += 1
        # removes large gaps from the data
        data = np.minimum(data, self.limit)
        data = np.maximum(data, -self.limit)
        data = np.array(data, dtype=np.float32)
        data[:, 0:2] /= self.scale_factor
        raw_data.append(data)
        seq_len.append(len(data))
    seq_len = np.array(seq_len)  # nstrokes for each sketch
    idx = np.argsort(seq_len)
    self.strokes = []
    for i in range(len(seq_len)):
      self.strokes.append(raw_data[idx[i]])
    print("total images <= max_seq_len is %d" % count_data)
    self.num_batches = int(count_data / self.batch_size)
eroder.py 文件源码 项目:bpy_lambda 作者: bcongdon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def flow(self, Kc, Ks, Kz, Ka, numexpr):
        zeros = np.zeros
        where = np.where
        min = np.minimum
        max = np.maximum
        abs = np.absolute
        arctan = np.arctan
        sin = np.sin

        center = (slice(   1,   -1,None),slice(   1,  -1,None))
        rock = self.center
        ds = self.scour[center]    
        rcc = rock[center]
        rock[center] = rcc - ds * Kz
        # there isn't really a bottom to the rock but negative values look ugly
        rock[center] = where(rcc<0,0,rcc)
two_room_domain.py 文件源码 项目:rl_algorithms 作者: DanielTakeshi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _init_grid(self):
        """ Initializes the grid. Currently works best for multiples of 3 which
        are also odd. For now let's only test on 9x9 grids. """

        self.grid.fill(OPEN)
        w1 = np.maximum((self.length/3), 1)
        w2 = np.minimum(2*(self.length/3), self.length)
        self.grid[:, w1:w2].fill(WALL)
        self.grid[self.length/2, :].fill(OPEN)

        sx = np.random.randint(0, self.length)
        sy = np.random.randint(0, w1)
        gx = np.random.randint(0, self.length)
        gy = np.random.randint(w2, self.length)
        s_agent = (sx,sy)
        s_goal = (gx,gy)

        assert s_agent != s_goal
        assert self.grid[s_agent] != WALL
        assert self.grid[s_goal] != WALL
        self.grid[s_agent] = AGENT
        self.grid[s_goal] = GOAL
        s_start = s_agent
        return s_start, s_agent, s_goal
utils.py 文件源码 项目:NMT 作者: tuzhaopeng 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)
utils.py 文件源码 项目:NMT 作者: tuzhaopeng 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def sample_weights_orth(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)

    assert sizeX == sizeY, 'for orthogonal init, sizeX == sizeY'

    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals

    u,s,v = numpy.linalg.svd(values)
    values = u * scale

    return values.astype(theano.config.floatX)
utils.py 文件源码 项目:NMT 作者: tuzhaopeng 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)
mixer_calibration.py 文件源码 项目:Auspex 作者: BBN-Q 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def find_null_offset(xpts, powers, default=0.0):
    """Finds the offset corresponding to the minimum power using a fit to the measured data"""
    def model(x, a, b, c):
        return a*(x - b)**2 + c
    powers = np.power(10, powers/10.)
    min_idx = np.argmin(powers)
    try:
        fit = curve_fit(model, xpts, powers, p0=[1, xpts[min_idx], powers[min_idx]])
    except RuntimeError:
        logger.warning("Mixer null offset fit failed.")
        return default, np.zeros(len(powers))
    best_offset = np.real(fit[0][1])
    best_offset = np.minimum(best_offset, xpts[-1])
    best_offset = np.maximum(best_offset, xpts[0])
    xpts_fine = np.linspace(xpts[0],xpts[-1],101)
    fit_pts = np.array([np.real(model(x, *fit[0])) for x in xpts_fine])
    if min(fit_pts)<0: fit_pts-=min(fit_pts)-1e-10 #prevent log of a negative number
    return best_offset, xpts_fine, 10*np.log10(fit_pts)
test.py 文件源码 项目:py-faster-rcnn-resnet-imagenet 作者: tianzhi0549 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
gaussian_process.py 文件源码 项目:probabilistic_line_search 作者: ProbabilisticNumerics 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def quadratic_polynomial_coefficients(self, t):
    """The posterior mean ``mu`` of this GP is piece-wise cubic. Return the
    coefficients of the **quadratic** polynomial that is the **derivative** of
    ``mu`` at ``t``.

    This is used to find the minimum of the cubic polynomial in
    ``gp.find_mimima()``."""

    assert isinstance(t, (float, np.float32, np.float64))
    assert t not in self.ts # at the observations, polynomial is ambiguous

    d1, d2, d3 = self.dmu(t), self.d2mu(t), self.d3mu(t)
    a = .5*d3
    b = d2 - d3*t
    c = d1 - d2*t + 0.5*d3*t**2

    return (a, b, c)
models_test.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_train(self):
    model, fetches_ = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN)
    predictions_, loss_, _ = fetches_

    target_len = self.sequence_length + 10 + 2
    max_decode_length = model.params["target.max_seq_len"]
    expected_decode_len = np.minimum(target_len, max_decode_length)

    np.testing.assert_array_equal(predictions_["logits"].shape, [
        self.batch_size, expected_decode_len - 1,
        model.target_vocab_info.total_size
    ])
    np.testing.assert_array_equal(predictions_["losses"].shape,
                                  [self.batch_size, expected_decode_len - 1])
    np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
                                  [self.batch_size, expected_decode_len - 1])
    self.assertFalse(np.isnan(loss_))
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def eval_one_dataset(self, sess, dataset, save_dir, subset='train'):
        count = 0
        print('num_examples:', dataset._num_examples)
        while count < dataset._num_examples:
            start = count % dataset._num_examples
            images, embeddings_batchs, filenames, _ =\
                dataset.next_batch_test(self.batch_size, start, 1)
            print('count = ', count, 'start = ', start)
            for i in range(len(embeddings_batchs)):
                samples_batchs = []
                # Generate up to 16 images for each sentence,
                # with randomness from noise z and conditioning augmentation.
                for j in range(np.minimum(16, cfg.TRAIN.NUM_COPY)):
                    samples = sess.run(self.fake_images,
                                       {self.embeddings: embeddings_batchs[i]})
                    samples_batchs.append(samples)
                self.save_super_images(images, samples_batchs,
                                       filenames, i, save_dir,
                                       subset)

            count += self.batch_size
utils.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def custom_crop(img, bbox):
    # bbox = [x-left, y-top, width, height]
    imsiz = img.shape  # [height, width, channel]
    # if box[0] + box[2] >= imsiz[1] or\
    #     box[1] + box[3] >= imsiz[0] or\
    #     box[0] <= 0 or\
    #     box[1] <= 0:
    #     box[0] = np.maximum(0, box[0])
    #     box[1] = np.maximum(0, box[1])
    #     box[2] = np.minimum(imsiz[1] - box[0] - 1, box[2])
    #     box[3] = np.minimum(imsiz[0] - box[1] - 1, box[3])
    center_x = int((2 * bbox[0] + bbox[2]) / 2)
    center_y = int((2 * bbox[1] + bbox[3]) / 2)
    R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
    y1 = np.maximum(0, center_y - R)
    y2 = np.minimum(imsiz[0], center_y + R)
    x1 = np.maximum(0, center_x - R)
    x2 = np.minimum(imsiz[1], center_x + R)
    img_cropped = img[y1:y2, x1:x2, :]
    return img_cropped
test.py 文件源码 项目:RON 作者: taokong 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def vis_detections(im, class_name, dets, thresh=0.5):
    """Visual debugging of detections."""

    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(5, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]

        if score > thresh:

            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
mklmm.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2


问题


面经


文章

微信
公众号

扫码关注公众号