python类amax()的实例源码

utils.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def min_side(_, pos):
    """
    Given an object pixels' positions, return the minimum side length of its
    bounding box
    :param _: pixel values (unused)
    :param pos: pixel position (1-D)
    :return: minimum bounding box side length
    """
    xs = np.array([i / SSIZE for i in pos])
    ys = np.array([i % SSIZE for i in pos])
    minx = np.amin(xs)
    miny = np.amin(ys)
    maxx = np.amax(xs)
    maxy = np.amax(ys)
    ct1 = compute_line(np.array([minx, miny]), np.array([minx, maxy]))
    ct2 = compute_line(np.array([minx, miny]), np.array([maxx, miny]))
    return min(ct1, ct2)
plot.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def view_trigger_snippets_bis(trigger_snippets, elec_index, save=None):
    fig = pylab.figure()
    ax = fig.add_subplot(1, 1, 1)
    for n in xrange(0, trigger_snippets.shape[2]):
        y = trigger_snippets[:, elec_index, n]
        x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
        b = 0.5 + 0.5 * numpy.random.rand()
        ax.plot(x, y, color=(0.0, 0.0, b), linestyle='solid')
    ax.grid(True)
    ax.set_xlim([numpy.amin(x), numpy.amax(x)])
    ax.set_xlabel("time")
    ax.set_ylabel("amplitude")
    if save is None:
        pylab.show()
    else:
        pylab.savefig(save)
        pylab.close(fig)
    return
convert_data.py 文件源码 项目:aapm_thoracic_challenge 作者: xf4j 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_labels(contours, shape, slices):
    z = [np.around(s.ImagePositionPatient[2], 1) for s in slices]
    pos_r = slices[0].ImagePositionPatient[1]
    spacing_r = slices[0].PixelSpacing[1]
    pos_c = slices[0].ImagePositionPatient[0]
    spacing_c = slices[0].PixelSpacing[0]

    label_map = np.zeros(shape, dtype=np.float32)
    for con in contours:
        num = ROI_ORDER.index(con['name']) + 1
        for c in con['contours']:
            nodes = np.array(c).reshape((-1, 3))
            assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0
            z_index = z.index(np.around(nodes[0, 2], 1))
            r = (nodes[:, 1] - pos_r) / spacing_r
            c = (nodes[:, 0] - pos_c) / spacing_c
            rr, cc = polygon(r, c)
            label_map[z_index, rr, cc] = num

    return label_map
util.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def to_rgb(img):
    """
    Converts the given array into a RGB image. If the number of channels is not
    3 the array is tiled such that it has 3 channels. Finally, the values are
    rescaled to [0,255) 

    :param img: the array to convert [nx, ny, channels]

    :returns img: the rgb image [nx, ny, 3]
    """
    img = np.atleast_3d(img)
    channels = img.shape[2]
    if channels < 3:
        img = np.tile(img, 3)

    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    img *= 255
    return img
wordemb-vis-tsne.py 文件源码 项目:nn4nlp-code 作者: neubig 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def display_data(word_vectors, words, target_words=None):
  target_matrix = word_vectors.copy()
  if target_words:
    target_words = [line.strip().lower() for line in open(target_words)][:2000]
    rows = [words.index(word) for word in target_words if word in words]
    target_matrix = target_matrix[rows,:]
  else:
    rows = np.random.choice(len(word_vectors), size=1000, replace=False)
    target_matrix = target_matrix[rows,:]
  reduced_matrix = tsne(target_matrix, 2);

  Plot.figure(figsize=(200, 200), dpi=100)
  max_x = np.amax(reduced_matrix, axis=0)[0]
  max_y = np.amax(reduced_matrix, axis=0)[1]
  Plot.xlim((-max_x,max_x))
  Plot.ylim((-max_y,max_y))

  Plot.scatter(reduced_matrix[:, 0], reduced_matrix[:, 1], 20);

  for row_id in range(0, len(rows)):
      target_word = words[rows[row_id]]
      x = reduced_matrix[row_id, 0]
      y = reduced_matrix[row_id, 1]
      Plot.annotate(target_word, (x,y))
  Plot.savefig("word_vectors.png");
dataset.py 文件源码 项目:untwist 作者: IoSR-Surrey 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def add(self, x, y = None):
        self.X =  np.memmap(
            self.path+"/X.npy", self.X.dtype,
            shape = (self.nrows + x.shape[0] , x.shape[1])
        )
        self.X[self.nrows:self.nrows + x.shape[0],:] = x

        if y is not None:
            if x.shape != y.shape: raise "x and y should have the same shape"
            self.Y = np.memmap(
                self.path+"/Y.npy", self.Y.dtype,
                shape = (self.nrows + y.shape[0] , y.shape[1])
            )
            self.Y[self.nrows:self.nrows + y.shape[0],:] = y

        delta = x - self.running_mean
        n = self.X.shape[0] + np.arange(x.shape[0]) + 1
        self.running_dev += np.sum(delta * (x - self.running_mean), 0)
        self.running_mean += np.sum(delta / n[:, np.newaxis], 0)
        self.running_max  = np.amax(np.vstack((self.running_max, x)), 0)
        self.running_min  = np.amin(np.vstack((self.running_min, x)), 0)
        self.nrows += x.shape[0]
camera_intrinsics_estimation.py 文件源码 项目:esys-pbi 作者: fsxfreak 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _make_grid(dim=(11,4)):
    """
    this function generates the structure for an asymmetrical circle grid
    domain (0-1)
    """
    x,y = range(dim[0]),range(dim[1])
    p = np.array([[[s,i] for s in x] for i in y], dtype=np.float32)
    p[:,1::2,1] += 0.5
    p = np.reshape(p, (-1,2), 'F')

    # scale height = 1
    x_scale =  1./(np.amax(p[:,0])-np.amin(p[:,0]))
    y_scale =  1./(np.amax(p[:,1])-np.amin(p[:,1]))

    p *=x_scale,x_scale/.5

    return p
_act.py 文件源码 项目:skutil 作者: tgsmith61591 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _compute_stats(self, pred, expo, loss, prem):
        n_samples, n_groups = pred.shape[0], self.n_groups
        pred_ser = pd.Series(pred)
        loss_to_returns = np.sum(loss) / np.sum(prem)

        rank = pd.qcut(pred_ser, n_groups, labels=False)
        n_groups = np.amax(rank) + 1
        groups = np.arange(n_groups)  # if we ever go back to using n_groups...

        tab = pd.DataFrame({
            'rank': rank,
            'pred': pred,
            'prem': prem,
            'loss': loss,
            'expo': expo
        })

        grouped = tab[['rank', 'pred', 'prem', 'loss', 'expo']].groupby('rank')
        agg_rlr = (grouped['loss'].agg(np.sum) / grouped['prem'].agg(np.sum)) / loss_to_returns

        return tab, agg_rlr, n_groups
q-network.py 文件源码 项目:CartPole-v0 作者: hmtai6 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def reply(self):        
        batch = self.memory.sample(nbReplay)

        states = np.array([ o[0] for o in batch ])
        states_ = np.array([ (nbReplay if o[3] is None else o[3]) for o in batch ])

        p = agent.brain.predict(states)
        p_ = agent.brain.predict(states_)

        x = np.zeros((nbReplay, self.stateCnt))
        y = np.zeros((nbReplay, self.actionCnt))     

        for i in range(nbReplay):
            o = batch[i]
            s = o[0]; a = o[1]; r = o[2]; s_ = o[3]

            t = p[i]
            if s_ is None:
                t[a] = r
            else:
                t[a] = r + td_discount_rate * numpy.amax(p_[i])

            x[i] = s
            y[i] = t
knn.py 文件源码 项目:nimo 作者: wolfram2012 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, points, data):
        _,c = data.shape

        dist_mat = []
        if self.p == np.inf:
            for pt in points:
                pt = pt.reshape(1,c)
                row =  np.amax(np.abs(data - pt),axis=-1)
                dist_mat.append(row)
        else:
            for pt in points:
                pt = pt.reshape(1,c)
                row = np.sum(np.abs(data - pt)**self.p,axis=1)**(1.0/self.p)
                dist_mat.append(row)

        return np.array(dist_mat)
knn.py 文件源码 项目:nimo 作者: wolfram2012 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __call__(self, points, data):
        _,c = data.shape

        dist_mat = []
        if self.p == np.inf:
            for pt in points:
                pt = pt.reshape(1,c)
                raw_dist = self.scale*(data - pt)
                robust_dist = raw_dist/np.sqrt(1+raw_dist**2) # sigmoid - locally linear
                row =  np.amax(np.abs(robust_dist),axis=-1)
                dist_mat.append(row)
        else:
            for pt in points:
                pt = pt.reshape(1,c)
                raw_dist = self.scale*(data - pt)
                robust_dist = raw_dist/np.sqrt(1+raw_dist**2) # sigmoid - locally linear
                row = np.sum(np.abs(robust_dist)**self.p,axis=1)**(1.0/self.p)
                dist_mat.append(row)

        return np.array(dist_mat)
cluster.py 文件源码 项目:uncover-ml 作者: GeoscienceAustralia 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def compute_n_classes(classes, config):
    """The number of cluster centres to use for K-means

    Just handles the case where someone specifies k=5 but labels 10 classes
    in the training data. This will return k=10.

    Parameters
    ----------
    classes : ndarray
        an array of hard class assignments given as training data
    config : Config
        The app config class holding the number of classes asked for

    Returns
    -------
    k : int > 0
        The max of k and the number of classes referenced in the training data
    """
    k = mpiops.comm.allreduce(np.amax(classes), op=mpiops.MPI.MAX)
    k = int(max(k, config.n_classes))
    return k
multi_input.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def input_fn(df):
    """Format the downloaded data."""
    # Creates a dictionary mapping from each continuous feature column name (k)
    # to the values of that column stored in a constant Tensor.
    continuous_cols = [df[k].values for k in CONTINUOUS_COLUMNS]
    X_con = np.stack(continuous_cols).astype(np.float32).T

    # Standardise
    X_con -= X_con.mean(axis=0)
    X_con /= X_con.std(axis=0)

    # Creates a dictionary mapping from each categorical feature column name
    categ_cols = [np.where(pd.get_dummies(df[k]).values)[1][:, np.newaxis]
                  for k in CATEGORICAL_COLUMNS]
    n_values = [np.amax(c) + 1 for c in categ_cols]
    X_cat = np.concatenate(categ_cols, axis=1).astype(np.int32)

    # Converts the label column into a constant Tensor.
    label = df[LABEL_COLUMN].values[:, np.newaxis]

    # Returns the feature columns and the label.
    return X_con, X_cat, n_values, label
tasks.py 文件源码 项目:fexum 作者: KDD-OpenSource 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def calculate_feature_statistics(feature_id):
    feature = Feature.objects.get(pk=feature_id)

    dataframe = _get_dataframe(feature.dataset.id)
    feature_col = dataframe[feature.name]

    feature.min = np.amin(feature_col).item()
    feature.max = np.amax(feature_col).item()
    feature.mean = np.mean(feature_col).item()
    feature.variance = np.nanvar(feature_col).item()
    unique_values = np.unique(feature_col)
    integer_check = (np.mod(unique_values, 1) == 0).all()
    feature.is_categorical = integer_check and (unique_values.size < 10)
    if feature.is_categorical:
        feature.categories = list(unique_values)
    feature.save(update_fields=['min', 'max', 'variance', 'mean', 'is_categorical', 'categories'])

    del unique_values, feature
utils.py 文件源码 项目:pyglmnet 作者: glm-tools 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def softmax(w):
    """Softmax function of given array of number w.

    Parameters
    ----------
    w: array | list
        The array of numbers.

    Returns
    -------
    dist: array
        The resulting array with values ranging from 0 to 1.
    """
    w = np.array(w)
    maxes = np.amax(w, axis=1)
    maxes = maxes.reshape(maxes.shape[0], 1)
    e = np.exp(w - maxes)
    dist = e / np.sum(e, axis=1, keepdims=True)
    return dist
fgg_create.py 文件源码 项目:fdsgeogen 作者: FireDynamics 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def decompose(n, p):
    p_primes = primes(p)[::-1]
    procs = np.array([1,1,1])
    n_tmp = np.copy(n)

    for fac in p_primes:
        while (np.any(n_tmp > 0)):
            cpmax = np.argmax(n_tmp)
            cmax  = np.amax(n_tmp)
            if (cmax % fac == 0):
                n_tmp[cpmax] /= fac
                procs[cpmax] *= fac
                break
            else:
                n_tmp[cpmax] = -n_tmp[cpmax]

        if np.all(n_tmp < 0):
            print "!! decomposition does not work out... ", fac, n_tmp
            sys.exit()

        n_tmp = np.abs(n_tmp)

    print " - decomposition: resulting proc decomposition ", procs, " local mesh size ", n_tmp
    return procs[0], procs[1], procs[2]
specGAN.py 文件源码 项目:specGAN 作者: OSU-slatelab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def find_min_max(scp_file):
    minimum = float("inf")
    maximum = -float("inf")
    uid = 0
    offset = 0
    ark_dict, uid = read_mats(uid, offset, scp_file)
    while ark_dict:
        for key in ark_dict.keys():
            mat_max = np.amax(ark_dict[key])
            mat_min = np.amin(ark_dict[key])
            if mat_max > maximum:
                maximum = mat_max
            if mat_min < minimum:
                minimum = mat_min
        ark_dict, uid = read_mats(uid, offset, scp_file)
    print("min:", minimum, "max:", maximum)
    return minimum, maximum
Density.py 文件源码 项目:SecuML 作者: ANSSI-FR 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def displayDataset(self, dataset):
        eps = 0.00001
        linewidth = dataset.linewidth
        if np.var(dataset.values) < eps:
            linewidth += 2
            mean = np.mean(dataset.values)
            x = np.arange(0, 1, 0.1)
            x = np.sort(np.append(x, [mean, mean-eps, mean+eps]))
            density = [1 if v == mean else 0 for v in x]
        else:
            self.kde.fit(np.asarray([[x] for x in dataset.values]))
            ## Computes the x axis
            x_max = np.amax(dataset.values)
            x_min = np.amin(dataset.values)
            delta = x_max - x_min
            density_delta = 1.1 * delta
            x = np.arange(x_min, x_max, density_delta / self.num_points)
            x_density = [[y] for y in x]
            ## kde.score_samples returns the 'log' of the density
            log_density = self.kde.score_samples(x_density).tolist()
            density = map(math.exp, log_density)
        self.ax.plot(x, density, label = dataset.label, color = dataset.color,
                linewidth = linewidth, linestyle = dataset.linestyle)
BoxPlot.py 文件源码 项目:SecuML 作者: ANSSI-FR 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def display(self, output_filename):
        fig, (ax) = plt.subplots(1, 1)
        data   = [d.values for d in self.datasets]
        labels = [d.label for d in self.datasets]
        bp = ax.boxplot(data, labels = labels, notch = 0, sym = '+', vert = '1', whis = 1.5)
        plt.setp(bp['boxes'], color='black')
        plt.setp(bp['whiskers'], color='black')
        plt.setp(bp['fliers'], color='black', marker='+')
        for i in range(len(self.datasets)):
            box = bp['boxes'][i]
            box_x = []
            box_y = []
            for j in range(5):
                box_x.append(box.get_xdata()[j])
                box_y.append(box.get_ydata()[j])
            box_coords = list(zip(box_x, box_y))
            box_polygon = Polygon(box_coords, facecolor = self.datasets[i].color)
            ax.add_patch(box_polygon)
        if self.title is not None:
            ax.set_title(self.title)
        x_min = np.amin([np.amin(d.values) for d in self.datasets])
        x_max = np.amax([np.amax(d.values) for d in self.datasets])
        ax.set_ylim(x_min - 0.05*(x_max - x_min), x_max + 0.05*(x_max - x_min))
        fig.savefig(output_filename)
        plt.close(fig)
utilities.py 文件源码 项目:bgsCNN 作者: SaoYan 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_img_pair(img_batch):
    input_cast = img_batch[:,:,:,0:6].astype(dtype = np.float32)
    input_min = np.amin(input_cast, axis=(1,2,3))
    input_max = np.amax(input_cast, axis=(1,2,3))
    for i in range(3):
        input_min = np.expand_dims(input_min, i+1)
        input_max = np.expand_dims(input_max, i+1)
    input_norm = (input_cast - input_min) / (input_max - input_min)
    gt_cast = img_batch[:,:,:,6].astype(dtype = np.float32)
    gt_cast = np.expand_dims(gt_cast, 3)
    gt_min = np.amin(gt_cast, axis=(1,2,3))
    gt_max = np.amax(gt_cast, axis=(1,2,3))
    for i in range(3):
        gt_min = np.expand_dims(gt_min, i+1)
        gt_max = np.expand_dims(gt_max, i+1)
    gt_norm = (gt_cast - gt_min) / (gt_max - gt_min)
    return input_norm, gt_norm
mf_base.py 文件源码 项目:sequence-based-recommendations 作者: rdevooght 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_last(self, save_dir):
        '''Load last model from dir
        '''
        def extract_number_of_epochs(filename):
            m = re.search('_ne([0-9]+(\.[0-9]+)?)_', filename)
            return float(m.group(1))

        # Get all the models for this RNN
        file = save_dir + self._get_model_filename("*")
        file = np.array(glob.glob(file))

        if len(file) == 0:
            print('No previous model, starting from scratch')
            return 0

        # Find last model and load it
        last_batch = np.amax(np.array(map(extract_number_of_epochs, file)))
        last_model = save_dir + self._get_model_filename(last_batch)
        print('Starting from model ' + last_model)
        self.load(last_model)

        return last_batch
ltm.py 文件源码 项目:sequence-based-recommendations 作者: rdevooght 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def load_last(self, save_dir):
        '''Load last model from dir
        '''
        def extract_number_of_epochs(filename):
            m = re.search('_ne([0-9]+(\.[0-9]+)?)_', filename)
            return float(m.group(1))

        # Get all the models for this RNN
        file = save_dir + self._get_model_filename("*")
        file = np.array(glob.glob(file))

        if len(file) == 0:
            print('No previous model, starting from scratch')
            return 0

        # Find last model and load it
        last_batch = np.amax(np.array(map(extract_number_of_epochs, file)))
        last_model = save_dir + self._get_model_filename(last_batch)
        print('Starting from model ' + last_model)
        self.load(last_model)

        return last_batch
single_File_For_ColorizationModel_For_Not_OOP_Fan.py 文件源码 项目:Deep-learning-Colorization-for-visual-media 作者: OmarSayedMostafa 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def Get_Batch_Chrominance():
    ''''Convert every image in the batch to LAB Colorspace and normalize each value of it between [0,1]

    Return:
     AbColores_values array [batch_size,2224,224,2] 0-> A value, 1-> B value color
    '''
    global AbColores_values
    global ColorImages_Batch
    AbColores_values = np.empty((Batch_size,224,224,2),"float32")
    for indx in range(Batch_size):
        lab = color.rgb2lab(ColorImages_Batch[indx])
        Min_valueA = np.amin(lab[:,:,1])
        Max_valueA = np.amax(lab[:,:,1])
        Min_valueB = np.amin(lab[:,:,2])
        Max_valueB = np.amax(lab[:,:,2])
        AbColores_values[indx,:,:,0] = Normalize(lab[:,:,1],-128,127)
        AbColores_values[indx,:,:,1] = Normalize(lab[:,:,2],-128,127)
base.py 文件源码 项目:vec4ir 作者: lgalke 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _partial_fit(self, X, y=None):
        _checkXy(X, y)
        # update index
        self._inv_X = sp.vstack([self._inv_X, self._cv.transform(X)])
        # update source
        # self._fit_X = np.hstack([self._fit_X, np.asarray(X)])
        # try to infer viable doc ids
        if y is None:
            next_id = np.amax(self._y) + 1
            y = np.arange(next_id, next_id + len(X))
        else:
            y = np.asarray(y)
        self._y = np.hstack([self._y, y])

        self.n_docs += len(X)
        return self
pg_actor_critic.py 文件源码 项目:tensorflow-reinforce 作者: yukezhu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def sampleAction(self, states):
    # TODO: use this code piece when tf.multinomial gets better
    # sample action from current policy
    # actions = self.session.run(self.predicted_actions, {self.states: states})[0]
    # return actions[0]

    # temporary workaround
    def softmax(y):
      """ simple helper function here that takes unnormalized logprobs """
      maxy = np.amax(y)
      e = np.exp(y - maxy)
      return e / np.sum(e)

    # epsilon-greedy exploration strategy
    if random.random() < self.exploration:
      return random.randint(0, self.num_actions-1)
    else:
      action_scores = self.session.run(self.action_scores, {self.states: states})[0]
      action_probs  = softmax(action_scores) - 1e-5
      action = np.argmax(np.random.multinomial(1, action_probs))
      return action
pg_reinforce.py 文件源码 项目:tensorflow-reinforce 作者: yukezhu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sampleAction(self, states):
    # TODO: use this code piece when tf.multinomial gets better
    # sample action from current policy
    # actions = self.session.run(self.predicted_actions, {self.states: states})[0]
    # return actions[0]

    # temporary workaround
    def softmax(y):
      """ simple helper function here that takes unnormalized logprobs """
      maxy = np.amax(y)
      e = np.exp(y - maxy)
      return e / np.sum(e)

    # epsilon-greedy exploration strategy
    if random.random() < self.exploration:
      return random.randint(0, self.num_actions-1)
    else:
      action_scores = self.session.run(self.action_scores, {self.states: states})[0]
      action_probs  = softmax(action_scores) - 1e-5
      action = np.argmax(np.random.multinomial(1, action_probs))
      return action
walk2vec.py 文件源码 项目:gym-kidney 作者: camoy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pool_max(alpha):
    """
    Given list of vectors alpha. Returns entry-wise
    max.
    """
    return np.amax(alpha, axis=1)

# A DistrFun is one of:
# - p0_min, Dirac delta distribution at vertex of min degree
# - p0_max, Dirac delta distribution at vertex of max egree
# - p0_median, Dirac delta distribution at vertex of median degree
# - p0_mean, Dirac delta distribution at vertex of mean degree

#
# Walk2VecEmbedding embeds the graph according to a modified Walk2Vec
# random walk method.
# - p0s : [DistrFun], initial distributions
# - tau : Nat, steps in the random walk
# - alpha : (0, 1], jump probability
#
train_ddqn.py 文件源码 项目:strategy 作者: kanghua309 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def replay(self):
        """Memory Management and training of the agent
        """
        if len(self.memory) < self.batch_size:
            return

        state, action, reward, next_state, done = self._get_batches()
        reward += (self.gamma
                   * np.logical_not(done)
                   * np.amax(self.model.predict(next_state), axis=1))
        q_target = self.target_model.predict(state)

        _ = pd.Series(action)
        one_hot = pd.get_dummies(_).as_matrix()
        action_batch = np.where(one_hot == 1)
        q_target[action_batch] = reward
        return self.model.fit(state, q_target,
                              batch_size=self.batch_size,
                              epochs=1,
                              verbose=False)
q1_softmax.py 文件源码 项目:Tensorflow-Softmax-NER-RNNLM 作者: queue-han 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_softmax_basic():
  """
  Some simple tests to get you started. 
  Warning: these are not exhaustive.
  """
  print "Running basic tests..."
  test1 = softmax(tf.convert_to_tensor(
      np.array([[1001,1002],[3,4]]), dtype=tf.float32))
  with tf.Session():
      test1 = test1.eval()
  assert np.amax(np.fabs(test1 - np.array(
      [0.26894142,  0.73105858]))) <= 1e-6

  test2 = softmax(tf.convert_to_tensor(
      np.array([[-1001,-1002]]), dtype=tf.float32))
  with tf.Session():
      test2 = test2.eval()
  assert np.amax(np.fabs(test2 - np.array(
      [0.73105858, 0.26894142]))) <= 1e-6

  print "Basic (non-exhaustive) softmax tests pass\n"
q1_softmax.py 文件源码 项目:Tensorflow-Softmax-NER-RNNLM 作者: queue-han 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_cross_entropy_loss_basic():
  """
  Some simple tests to get you started.
  Warning: these are not exhaustive.
  """
  y = np.array([[0, 1], [1, 0], [1, 0]])
  yhat = np.array([[.5, .5], [.5, .5], [.5, .5]])

  test1 = cross_entropy_loss(
      tf.convert_to_tensor(y, dtype=tf.int32),
      tf.convert_to_tensor(yhat, dtype=tf.float32))
  with tf.Session():
    test1 = test1.eval()
  result = -3 * np.log(.5)
  assert np.amax(np.fabs(test1 - result)) <= 1e-6
  print "Basic (non-exhaustive) cross-entropy tests pass\n"


问题


面经


文章

微信
公众号

扫码关注公众号