python类xrange()的实例源码

gradient_check.py 文件源码 项目:selfMachineLearning 作者: xhappy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
  """
  sample a few random elements and only return numerical
  in this dimensions.
  """

  for i in xrange(num_checks):
    ix = tuple([randrange(m) for m in x.shape])

    oldval = x[ix]
    x[ix] = oldval + h # increment by h
    fxph = f(x) # evaluate f(x + h)
    x[ix] = oldval - h # increment by h
    fxmh = f(x) # evaluate f(x - h)
    x[ix] = oldval # reset

    grad_numerical = (fxph - fxmh) / (2 * h)
    grad_analytic = analytic_grad[ix]
    rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))
    print('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))
k_nearest_neighbor.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def compute_distances_one_loop(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using a single loop over the test data.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train))
    for i in xrange(num_test):
      #######################################################################
      # TODO:                                                               #
      # Compute the l2 distance between the ith test point and all training #
      # points, and store the result in dists[i, :].                        #
      #######################################################################
      pass
      #######################################################################
      #                         END OF YOUR CODE                            #
      #######################################################################
    return dists
trees.py 文件源码 项目:monkeys 作者: hchasestevens 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_tree(return_type, allowed_functions=None, convert=True, selection_strategy=None):
    if allowed_functions is not None:
        allowed_functions = frozenset(allowed_functions)
    starting_functions = find_functions(return_type, allowed_functions, convert)
    for __ in xrange(99999):
        try:
            return Node(
                random.choice(starting_functions), 
                allowed_functions=allowed_functions,
                selection_strategy=selection_strategy,
            )
        except RuntimeError:
            pass
    raise TreeConstructionError(
        "Unable to construct program, consider raising recursion depth limit."
    )
Table.py 文件源码 项目:LTTL 作者: axanthos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def to_numpy(self):
        """Return a numpy array with the content of a crosstab"""

        # Set numpy table type based on the crosstab's type...
        if isinstance(self, IntPivotCrosstab):
            np_type = np.dtype(np.int32)
        elif isinstance(self, PivotCrosstab):
            np_type = np.dtype(np.float32)

        # Initialize numpy table...
        np_table = np.empty([len(self.row_ids), len(self.col_ids)], np_type)
        np_table.fill(self.missing or 0)

        # Fill and return numpy table...
        for row_idx in xrange(len(self.row_ids)):
            for col_idx in xrange(len(self.col_ids)):
                try:
                    np_table[row_idx][col_idx] = self.values[
                        (self.row_ids[row_idx], self.col_ids[col_idx])
                    ]
                except KeyError:
                    pass
        return np_table

    # TODO: test.
tilegrids.py 文件源码 项目:lib-gatilegrid 作者: geoadmin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def iterGrid(self, minZoom, maxZoom):
        "Yields the tileBounds, zoom, tileCol and tileRow"
        assert minZoom in range(0, len(self.RESOLUTIONS))
        assert maxZoom in range(0, len(self.RESOLUTIONS))
        assert minZoom <= maxZoom

        for zoom in xrange(minZoom, maxZoom + 1):
            [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
            for row in xrange(minRow, maxRow + 1):
                for col in xrange(minCol, maxCol + 1):
                    tileBounds = self.tileBounds(zoom, col, row)
                    yield (tileBounds, zoom, col, row)
tilegrids.py 文件源码 项目:lib-gatilegrid 作者: geoadmin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def totalNumberOfTiles(self, minZoom=None, maxZoom=None):
        "Return the total number of tiles for this instance extent"
        nbTiles = 0
        minZoom = minZoom or 0
        if maxZoom:
            maxZoom = maxZoom + 1
        else:
            maxZoom = len(self.RESOLUTIONS)
        for zoom in xrange(minZoom, maxZoom):
            nbTiles += self.numberOfTilesAtZoom(zoom)
        return nbTiles
grid.py 文件源码 项目:lib-gatilegrid 作者: geoadmin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __iter__(self):
        for col in xrange(0, self.nbCellsX):
            for row in xrange(0, self.nbCellsY):
                cellExtent = self.cellExtent(col, row)
                yield (cellExtent, col, row)
encoder.py 文件源码 项目:Vector-Tiles-Reader-QGIS-Plugin 作者: geometalab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _chunker(self, seq, size):
        return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
vis_utils.py 文件源码 项目:selfMachineLearning 作者: xhappy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def visualize_grid(Xs, ubound=255.0, padding=1):
  """
  Reshape a 4D tensor of image data to a grid for easy visualization.

  Inputs:
  - Xs: Data of shape (N, H, W, C)
  - ubound: Output grid will have values scaled to the range [0, ubound]
  - padding: The number of blank pixels between elements of the grid
  """
  (N, H, W, C) = Xs.shape
  grid_size = int(ceil(sqrt(N)))
  grid_height = H * grid_size + padding * (grid_size - 1)
  grid_width = W * grid_size + padding * (grid_size - 1)
  grid = np.zeros((grid_height, grid_width, C))
  next_idx = 0
  y0, y1 = 0, H
  for y in xrange(grid_size):
    x0, x1 = 0, W
    for x in xrange(grid_size):
      if next_idx < N:
        img = Xs[next_idx]
        low, high = np.min(img), np.max(img)
        grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
        # grid[y0:y1, x0:x1] = Xs[next_idx]
        next_idx += 1
      x0 += W + padding
      x1 += W + padding
    y0 += H + padding
    y1 += H + padding
  # grid_max = np.max(grid)
  # grid_min = np.min(grid)
  # grid = ubound * (grid - grid_min) / (grid_max - grid_min)
  return grid
k_nearest_neighbor.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def compute_distances_two_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using a nested loop over both the training data and the 
    test data.

    Inputs:
    - X: A numpy array of shape (num_test, D) containing test data.

    Returns:
    - dists: A numpy array of shape (num_test, num_train) where dists[i, j]
      is the Euclidean distance between the ith test point and the jth training
      point.
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train))
    for i in xrange(num_test):
      for j in xrange(num_train):
        #####################################################################
        # TODO:                                                             #
        # Compute the l2 distance between the ith test point and the jth    #
        # training point, and store the result in dists[i, j]. You should   #
        # not use a loop over dimension.                                    #
        #####################################################################
        pass
        #####################################################################
        #                       END OF YOUR CODE                            #
        #####################################################################
    return dists
k_nearest_neighbor.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def predict_labels(self, dists, k=1):
    """
    Given a matrix of distances between test points and training points,
    predict a label for each test point.

    Inputs:
    - dists: A numpy array of shape (num_test, num_train) where dists[i, j]
      gives the distance betwen the ith test point and the jth training point.

    Returns:
    - y: A numpy array of shape (num_test,) containing predicted labels for the
      test data, where y[i] is the predicted label for the test point X[i].  
    """
    num_test = dists.shape[0]
    y_pred = np.zeros(num_test)
    for i in xrange(num_test):
      # A list of length k storing the labels of the k nearest neighbors to
      # the ith test point.
      closest_y = []
      #########################################################################
      # TODO:                                                                 #
      # Use the distance matrix to find the k nearest neighbors of the ith    #
      # testing point, and use self.y_train to find the labels of these       #
      # neighbors. Store these labels in closest_y.                           #
      # Hint: Look up the function numpy.argsort.                             #
      #########################################################################
      pass
      #########################################################################
      # TODO:                                                                 #
      # Now that you have found the labels of the k nearest neighbors, you    #
      # need to find the most common label in the list closest_y of labels.   #
      # Store this label in y_pred[i]. Break ties by choosing the smaller     #
      # label.                                                                #
      #########################################################################
      pass
      #########################################################################
      #                           END OF YOUR CODE                            # 
      #########################################################################

    return y_pred
softmax.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def softmax_loss_vectorized(W, X, y, reg):
  """
  Softmax loss function, vectorized version.

  Inputs and outputs are the same as softmax_loss_naive.
  """
  # Initialize the loss and gradient to zero.
  num_train = X.shape[0]
  loss = 0.0
  dW = np.zeros_like(W)

  #############################################################################
  # TODO: Compute the softmax loss and its gradient using no explicit loops.  #
  # Store the loss in loss and the gradient in dW. If you are not careful     #
  # here, it is easy to run into numeric instability. Don't forget the        #
  # regularization!                                                           #
  #############################################################################
  scores = X.dot(W)
  scores -= np.max(scores, axis=1, keepdims=True)
  # print scores.shape
  pscores = np.exp(scores)
  pscores_norm = pscores/np.sum(pscores, axis=1, keepdims=True)
  loss = np.sum(-scores[xrange(num_train),y] + np.log(np.sum(pscores, axis=1)))

  pscores_norm[xrange(num_train),y] -= 1
  dW = X.T.dot(pscores_norm)

  loss /= num_train
  loss += 0.5*reg*np.sum(W*W)

  dW /= num_train
  dW += reg * W

  #############################################################################
  #                          END OF YOUR CODE                                 #
  #############################################################################

  return loss, dW
make_data_set.py 文件源码 项目:recnet 作者: joergfranke 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def make_duration_signal(rng, length):
    duration_signal = np.zeros(length)
    for i in xrange(0,length[0], 1):
        duration_signal[i] = rng.randint(1,9,1)
    return duration_signal


######              Create target signal
########################################
make_data_set.py 文件源码 项目:recnet 作者: joergfranke 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def make_target_signal(start_signal, duration_signal):
    target_signal = np.zeros([start_signal.shape[0], 2])
    counter = 0
    for i in xrange(target_signal.shape[0]):
        if start_signal[i] == 1:
            counter = duration_signal[i]
        if counter > 0:
            target_signal[i, 0] = 1
            counter -= 1
    target_signal[:,1] = 1 - target_signal[:,0]
    return target_signal


######                   Create data set
########################################
make_data_set.py 文件源码 项目:recnet 作者: joergfranke 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def make_data_set(rng, samples):
    input_data = []
    output_data = []
    for i in xrange(samples):
        length = rng.randint(100,200,1)
        start_signal = make_start_signal(rng, length)
        duration_signal = make_duration_signal(rng, length)
        target_signal = make_target_signal(start_signal, duration_signal)
        input_data.append(np.concatenate([start_signal.reshape([length[0],1]),duration_signal.reshape([length[0],1])],axis=1))
        output_data.append(target_signal)
    return input_data, output_data


######                Create klepto file
########################################
layer_master.py 文件源码 项目:recnet 作者: joergfranke 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def rec_ortho(self, rng, ndim, ndim_factor):
        W = np.concatenate([self.sqr_ortho(rng, ndim) for i in xrange(ndim_factor)], axis=1)
        return W
parameter_supervisor.py 文件源码 项目:recnet 作者: joergfranke 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def pass_structure_dict(self, prm_structure):

        if "net_size" in prm_structure:
            self.struct["net_size"      ] = prm_structure["net_size"]
            self.struct["hidden_layer"  ] = prm_structure["net_size"].__len__() - 2
        else:
            raise Warning("No net size")

        if "net_unit_type" in prm_structure:
            self.struct["net_unit_type"      ] = prm_structure["net_unit_type"]
            if  prm_structure["net_unit_type"].__len__() != self.struct["net_size" ].__len__():
                raise Warning("Net size and unit type have no equal length")
        else:
            raise Warning("No net unit type")

        if "net_act_type" in prm_structure:
            self.struct["net_act_type"      ] = prm_structure["net_act_type"]
            if  prm_structure["net_act_type"].__len__() != self.struct["net_size" ].__len__():
                raise Warning("Net size and act type have no equal length")
        else:
            self.struct["net_act_type" ] = ['tanh' for i in xrange(prm_structure["net_size"].__len__())]

        if "net_arch" in prm_structure:
            self.struct["net_arch"      ] = prm_structure["net_arch"]
            if  prm_structure["net_arch"].__len__() != self.struct["net_size" ].__len__():
                raise Warning("Net size and net architecture have no equal length")
        else:
            raise Warning("No network architecture 'net_arch' ")

        self.struct["weight_numb"] = 0


        if "identity_func" in prm_structure: #(currently corrupted)
            self.struct["identity_func"] = prm_structure["identity_func"]
        else:
            self.struct["identity_func"] = False


    ##### Passes parameters in optimize dictionary
    ########################################
test_builtins.py 文件源码 项目:packaging 作者: blockstack 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_baddecorator(self):
        data = 'The quick Brown fox Jumped over The lazy Dog'.split()
        self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)

# def _run_unittest(*args):
#     # with check_py3k_warnings(
#     #         (".+ not supported in 3.x", DeprecationWarning),
#     #         (".+ is renamed to imp.reload", DeprecationWarning),
#     #         ("classic int division", DeprecationWarning)):
#     if True:
#         run_unittest(*args)
# 
# def test_main(verbose=None):
#     test_classes = (BuiltinTest, TestSorted)
# 
#     _run_unittest(*test_classes)
# 
#     # verify reference counting
#     if verbose and hasattr(sys, "gettotalrefcount"):
#         import gc
#         counts = [None] * 5
#         for i in xrange(len(counts)):
#             _run_unittest(*test_classes)
#             gc.collect()
#             counts[i] = sys.gettotalrefcount()
#         print(counts)
terrain.py 文件源码 项目:quantized-mesh-tile 作者: loicgasser 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def getTrianglesCoordinates(self):
        """
        A method to retrieve triplet of coordinates representing the triangles
        in lon,lat,height.
        """
        triangles = []
        self._computeVerticesCoordinates()
        indices = iter(self.indices)
        for i in xrange(0, len(self.indices) - 1, 3):
            vi1 = next(indices)
            vi2 = next(indices)
            vi3 = next(indices)
            triangle = (
                (self._longs[vi1],
                 self._lats[vi1],
                 self._heights[vi1]),
                (self._longs[vi2],
                 self._lats[vi2],
                 self._heights[vi2]),
                (self._longs[vi3],
                 self._lats[vi3],
                 self._heights[vi3])
            )
            triangles.append(triangle)
        if len(list(indices)) > 0:
            raise Exception('Corrupted tile')
        return triangles
utils.py 文件源码 项目:quantized-mesh-tile 作者: loicgasser 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def unpackIndices(f, indicesCount, indicesType):
    indices = []
    for i in xrange(0, indicesCount):
        indices.append(
            unpackEntry(f, indicesType)
        )
    return indices
utils.py 文件源码 项目:quantized-mesh-tile 作者: loicgasser 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def createCoordsPairs(l):
    coordsPairs = []
    for i in xrange(0, len(l)):
        coordsPairs.append([l[i], l[(i + 2) % len(l)]])
    return coordsPairs
rec_memory.py 文件源码 项目:backtrackbb 作者: BackTrackBB 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def init_recursive_memory(config):
    n_bands = config.n_freq_bands
    nsamples = int(config.time_lag / config.delta)
    overlap = int(config.t_overlap / config.delta)
    # Create a dictionary of memory objects
    rec_memory = dict()
    for trid, wave in itertools.product(config.trids, config.wave_type):
        # Each entry of the dictionary is a list of memory objects
        # (with n_bands elements)
        rec_memory[(trid, wave)] =\
            [RecursiveMemory(trid=trid, wave=wave, band=n,
                             nsamples=nsamples, overlap=overlap,
                             filter_npoles=config.filter_npoles)
             for n in xrange(n_bands)]
    return rec_memory
search.py 文件源码 项目:monkeys 作者: hchasestevens 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_tree_to_requirements(scoring_function, build_tree=build_tree):
    params = getattr(scoring_function, '__params', ())
    if len(params) != 1:
        raise ValueError("Scoring function must accept a single parameter.")
    return_type, = params

    for __ in xrange(9999):
        with recursion_limit(500):
            tree = build_tree(return_type, convert=False)
        requirements = getattr(scoring_function, 'required_inputs', ())
        if not all(req in tree for req in requirements):
            continue
        return tree

    raise UnsatisfiableType("Could not meet input requirements.")
search.py 文件源码 项目:monkeys 作者: hchasestevens 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def next_generation(
        trees, scoring_fn,
        select_fn=DEFAULT_TOURNAMENT_SELECT,
        build_tree=build_tree_to_requirements, mutate=mutate,
        crossover_rate=0.80, mutation_rate=0.01,
        score_callback=None,
        optimizations=DEFAULT_OPTIMIZATIONS
    ):
    """
    Create next generation of trees from prior generation, maintaining current
    size.
    """
    selector = select_fn(trees, scoring_fn, score_callback=score_callback, optimizations=optimizations)
    pop_size = len(trees)

    new_pop = [max(trees, key=scoring_fn)]
    for __ in xrange(pop_size - 1):
        if random.random() <= crossover_rate:
            for __ in xrange(99999):
                try:
                    new_pop.append(crossover(next(selector), next(selector)))
                    break
                except (UnsatisfiableType, RuntimeError):
                    continue
            else:
                new_pop.append(build_tree(scoring_fn))

        elif random.random() <= mutation_rate / (1 - crossover_rate):
            new_pop.append(mutate(next(selector)))

        else:
            new_pop.append(next(selector))

    return new_pop
diagnostics.py 文件源码 项目:monkeys 作者: hchasestevens 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def show_report(self, top=3):
        for exception in self.exceptions:
            print('{}:'.format(exception))
            edge_weightings = iteritems(self.edge_weightings[exception])
            for __, (edge, weight) in zip(xrange(top), edge_weightings):
                print('    {:.2f} | {}'.format(weight, edge))
encoder.py 文件源码 项目:go2mapillary 作者: enricofer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _chunker(self, seq, size):
        return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
random_search.py 文件源码 项目:pyswarms 作者: ljvmiranda921 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def generate_grid(self):
        """Generates the grid of hyperparameter value combinations."""

        options = dict(self.options)
        params = {}

        # Remove 'p' to hold as a constant in the paramater combinations
        p = options.pop('p')
        params['p'] = [p for _ in xrange(self.n_selection_iters)]

        # Assign generators based on parameter type
        param_generators = {
            'c1': np.random.uniform,
            'c2': np.random.uniform,
            'w': np.random.uniform,
            'k': np.random.randint
        }

        # Generate random values for hyperparameters 'c1', 'c2', 'w', and 'k'
        for idx, bounds in options.items():
            params[idx] = param_generators[idx](
                              *bounds, size=self.n_selection_iters)

        # Return list of dicts of hyperparameter combinations
        return [{'c1': params['c1'][i],
                 'c2': params['c2'][i],
                 'w': params['w'][i],
                 'k': params['k'][i],
                 'p': params['p'][i]}
                for i in xrange(self.n_selection_iters)]
gpcharts.py 文件源码 项目:GooPyCharts 作者: Dfenestrator 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def combineData(xdata,ydata,xlabel):
    #if ydata is a simple vector, encapsulate it into a 2D list
    if type(ydata[1]) is not list:
        ydata = [[val] for val in ydata]

    #if xdata is time data, add HH:MM:SS if it is missing (just 00:00:00)
    if type(xdata[1]) is str:
        #check if first 4 characters of xdata is a valid year
        if len(xdata[1]) == 10 and int(xdata[1][:4]) > 0 and int(xdata[1][:4]) < 3000:
            xdata[1:] = [val+' 00:00:00' for val in xdata[1:]]

    #figure out independent variable headers
    # if there is a title row, use that title
    if type(ydata[0][0]) is str:
        data = [[xdata[0]] + ydata[0]]
        for i in xrange(1,len(xdata)):
            data.append([xdata[i]]+ydata[i])
    # otherwise, use a default labeling
    else:
        header = [xlabel]
        for i in xrange(len(ydata[0])):
            header.append('data'+str(i+1))

        data = [header]
        for i in xrange(len(xdata)):
            data.append([xdata[i]]+ydata[i])

    return data

#helper function, returns title as a valid JS identifier, prefixed by '_'.
linear_svm.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def svm_loss_naive(W, X, y, reg):
  """
  Structured SVM loss function, naive implementation (with loops).

  Inputs have dimension D, there are C classes, and we operate on minibatches
  of N examples.

  Inputs:
  - W: A numpy array of shape (D, C) containing weights.
  - X: A numpy array of shape (N, D) containing a minibatch of data.
  - y: A numpy array of shape (N,) containing training labels; y[i] = c means
    that X[i] has label c, where 0 <= c < C.
  - reg: (float) regularization strength

  Returns a tuple of:
  - loss as single float
  - gradient with respect to weights W; an array of same shape as W
  """
  dW = np.zeros(W.shape) # initialize the gradient as zero

  # compute the loss and the gradient
  num_classes = W.shape[1]
  num_train = X.shape[0]
  loss = 0.0
  for i in xrange(num_train):
    scores = X[i].dot(W)
    correct_class_score = scores[y[i]]
    for j in xrange(num_classes):
      if j == y[i]:
        continue
      margin = scores[j] - correct_class_score + 1 # note delta = 1
      if margin > 0:
        loss += margin
        dW[:,j] += X[i]
        dW[:,y[i]] -= X[i]

  # Right now the loss is a sum over all training examples, but we want it
  # to be an average instead so we divide by num_train.
  loss /= num_train
  dW /= num_train

  # Add regularization to the loss.
  loss += 0.5 * reg * np.sum(W * W)
  dW += reg * W
  #############################################################################
  # TODO:                                                                     #
  # Compute the gradient of the loss function and store it dW.                #
  # Rather that first computing the loss and then computing the derivative,   #
  # it may be simpler to compute the derivative at the same time that the     #
  # loss is being computed. As a result you may need to modify some of the    #
  # code above to compute the gradient.                                       #
  #############################################################################


  return loss, dW
linear_svm.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def svm_loss_vectorized(W, X, y, reg):
  """
  Structured SVM loss function, vectorized implementation.

  Inputs and outputs are the same as svm_loss_naive.
  """
  num_train = X.shape[0]
  loss = 0.0
  dW = np.zeros(W.shape) # initialize the gradient as zero

  #############################################################################
  # TODO:                                                                     #
  # Implement a vectorized version of the structured SVM loss, storing the    #
  # result in loss.                                                           #
  #############################################################################
  scores = X.dot(W)
  margin = np.maximum(0, scores + 1 - scores[xrange(num_train), y][:,np.newaxis])
  margin[xrange(num_train), y] = 0
  # hinge[hinge<0] = 0
  loss = np.sum(margin)
  loss /= num_train
  loss += 0.5*reg*np.sum(W*W)
  #############################################################################
  #                             END OF YOUR CODE                              #
  #############################################################################
  margin[margin>0] = 1.0
  margin[xrange(num_train), y] -= np.sum(margin, axis=1)
  dW = X.T.dot(margin)/num_train + reg*W

  #############################################################################
  # TODO:                                                                     #
  # Implement a vectorized version of the gradient for the structured SVM     #
  # loss, storing the result in dW.                                           #
  #                                                                           #
  # Hint: Instead of computing the gradient from scratch, it may be easier    #
  # to reuse some of the intermediate values that you used to compute the     #
  # loss.                                                                     #
  #############################################################################

  #############################################################################
  #                             END OF YOUR CODE                              #
  #############################################################################

  return loss, dW


问题


面经


文章

微信
公众号

扫码关注公众号