python类copy()的实例源码

ModelingCloth.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_poly_normals(ob, type=np.float32):
    mod = False
    m_count = len(ob.modifiers)
    if m_count > 0:
        show = np.zeros(m_count, dtype=np.bool)
        ren_set = np.copy(show)
        ob.modifiers.foreach_get('show_render', show)
        ob.modifiers.foreach_set('show_render', ren_set)
        mod = True
    mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER')
    p_count = len(mesh.polygons)
    normal = np.zeros(p_count * 3)#, dtype=type)
    mesh.polygons.foreach_get('normal', normal)
    normal.shape = (p_count, 3)
    bpy.data.meshes.remove(mesh)
    if mod:
        ob.modifiers.foreach_set('show_render', show)

    return normal
ModelingCloth.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_v_normals(ob, type=np.float32):
    mod = False
    m_count = len(ob.modifiers)
    if m_count > 0:
        show = np.zeros(m_count, dtype=np.bool)
        ren_set = np.copy(show)
        ob.modifiers.foreach_get('show_render', show)
        ob.modifiers.foreach_set('show_render', ren_set)
        mod = True
    mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER')
    v_count = len(mesh.vertices)
    normal = np.zeros(v_count * 3)#, dtype=type)
    mesh.vertices.foreach_get('normal', normal)
    normal.shape = (v_count, 3)
    bpy.data.meshes.remove(mesh)
    if mod:
        ob.modifiers.foreach_set('show_render', show)

    return normal
ModelingCloth.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def execute(self, context):
        ob = bpy.context.object
        bpy.ops.object.mode_set(mode='OBJECT')
        sel = [i.index for i in ob.data.vertices if i.select]

        name = ob.name
        matrix = ob.matrix_world.copy()
        for v in sel:    
            e = bpy.data.objects.new('modeling_cloth_pin', None)
            bpy.context.scene.objects.link(e)
            if ob.active_shape_key is None:    
                closest = matrix * ob.data.vertices[v].co# * matrix
            else:
                closest = matrix * ob.active_shape_key.data[v].co# * matrix
            e.location = closest #* matrix
            e.show_x_ray = True
            e.select = True
            e.empty_draw_size = .1
            data[name].pin_list.append(v)
            data[name].hook_list.append(e)            
            ob.select = False
        bpy.ops.object.mode_set(mode='EDIT')       

        return {'FINISHED'}
project_v2.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def process_image(image):
    # printing out some stats and plotting
    print('This image is:', type(image), 'with dimesions:', image.shape)
    gray = grayscale(image)
    # Define a kernel size and apply Gaussian smoothing
    kernel_size = 5
    blur_gray = gaussian_blur(gray, kernel_size)
    # plt.imshow(blur_gray, cmap='gray')

    # Define our parameters for Canny and apply
    low_threshold = 45 #50
    high_threshold = 150 #150
    edges = canny(blur_gray, low_threshold, high_threshold)

    # This time we are defining a four sided polygon to mask
    imshape = image.shape
    #vertices = np.array([[(0,imshape[0]),(475, 310), (475, 310), (imshape[1],imshape[0])]], dtype=np.int32)
    vertices = np.array([[(0,imshape[0]),(450, 330), (490, 310), (imshape[1],imshape[0])]], dtype=np.int32)    
    masked_edges = region_of_interest(edges, vertices)

    # Define the Hough transform parameters
    # Make a blank the same size as our image to draw on
    rho = 1 # distance resolution in pixels of the Hough grid
    theta = np.pi/180 # angular resolution in radians of the Hough grid
    threshold = 15    # minimum number of votes (intersections in Hough grid cell)
    min_line_length = 40 #minimum number of pixels making up a line 150 - 40
    max_line_gap = 130 # maximum gap in pixels between connectable line segments 58 -95
    line_image = np.copy(image)*0 # creating a blank to draw lines on

    lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
    # Draw the lines on the edge image
    lines_edges = weighted_img(lines, image)
    return lines_edges
solver.py 文件源码 项目:PyFunt 作者: dnlcrl 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _reset(self):
        '''
        Set up some book-keeping variables for optimization. Don't call this
        manually.
        '''
        # Set up some variables for book-keeping
        self.epoch = 0
        self.best_val_acc = 0
        self.best_params = {}
        self.loss_history = []
        self.val_acc_history = []
        self.train_acc_history = []
        self.pbar = None

        # Make a deep copy of the optim_config for each parameter
        self.optim_configs = {}
        self.params, self.grad_params = self.model.get_parameters()
        # self.weights, _ = self.model.get_parameters()
        for p in range(len(self.params)):
            d = {k: v for k, v in self.optim_config.iteritems()}
            self.optim_configs[p] = d

        self.multiprocessing = bool(self.num_processes-1)
        if self.multiprocessing:
            self.pool = mp.Pool(self.num_processes, init_worker)
gradient_check.py 文件源码 项目:PyFunt 作者: dnlcrl 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def eval_numerical_gradient_array(f, x, df, h=1e-5):
    '''
    Evaluate a numeric gradient for a function that accepts a numpy
    array and returns a numpy array.
    '''
    grad = np.zeros_like(x)
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        ix = it.multi_index

        oldval = x[ix]
        x[ix] = oldval + h
        pos = f(x).copy()
        x[ix] = oldval - h
        neg = f(x).copy()
        x[ix] = oldval

        grad[ix] = np.sum((pos - neg) * df) / (2 * h)
        it.iternext()
    return grad
test_function_base.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def test_order(self):
        # It turns out that people rely on np.copy() preserving order by
        # default; changing this broke scikit-learn:
        #   https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783
        a = np.array([[1, 2], [3, 4]])
        assert_(a.flags.c_contiguous)
        assert_(not a.flags.f_contiguous)
        a_fort = np.array([[1, 2], [3, 4]], order="F")
        assert_(not a_fort.flags.c_contiguous)
        assert_(a_fort.flags.f_contiguous)
        a_copy = np.copy(a)
        assert_(a_copy.flags.c_contiguous)
        assert_(not a_copy.flags.f_contiguous)
        a_fort_copy = np.copy(a_fort)
        assert_(not a_fort_copy.flags.c_contiguous)
        assert_(a_fort_copy.flags.f_contiguous)
test_function_base.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def test_axis_keyword(self):
        a3 = np.array([[2, 3],
                       [0, 1],
                       [6, 7],
                       [4, 5]])
        for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
            orig = a.copy()
            np.median(a, axis=None)
            for ax in range(a.ndim):
                np.median(a, axis=ax)
            assert_array_equal(a, orig)

        assert_allclose(np.median(a3, axis=0), [3,  4])
        assert_allclose(np.median(a3.T, axis=1), [3,  4])
        assert_allclose(np.median(a3), 3.5)
        assert_allclose(np.median(a3, axis=None), 3.5)
        assert_allclose(np.median(a3.T), 3.5)
function_base.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _vectorize_call(self, func, args):
        """Vectorized call to `func` over positional `args`."""
        if not args:
            _res = func()
        else:
            ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)

            # Convert args to object arrays first
            inputs = [array(_a, copy=False, subok=True, dtype=object)
                      for _a in args]

            outputs = ufunc(*inputs)

            if ufunc.nout == 1:
                _res = array(outputs,
                             copy=False, subok=True, dtype=otypes[0])
            else:
                _res = tuple([array(_x, copy=False, subok=True, dtype=_t)
                              for _x, _t in zip(outputs, otypes)])
        return _res
audioutils.py 文件源码 项目:gtzan.keras 作者: Hguimaraes 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fit(self, x, augment=False, rounds=1, seed=None):
    x = np.asarray(x, dtype=K.floatx())

    if x.ndim != 2:
      raise ValueError('Input to `.fit()` should have rank 2. '
        'Got array with shape: ' + str(x.shape))

    if seed is not None:
      np.random.seed(seed)

    x = np.copy(x)
    if augment:
      ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
      for r in range(rounds):
        for i in range(x.shape[0]):
          ax[i + r * x.shape[0]] = self.random_transform(x[i])
      x = ax

# @Class: Iterator
# @Description:
#   Abstract base class for Music data iterators.
#   n: Integer, total number of samples in the dataset to loop over.
#   batch_size: Integer, size of a batch.
#   shuffle: Boolean, whether to shuffle the data between epochs.
#   seed: Random seeding for data shuffling.
libscores.py 文件源码 项目:AutoML5 作者: djajetic 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def log_loss(solution, prediction, task = 'binary.classification'):
    ''' Log loss for binary and multiclass. '''
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction) # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == 'multiclass.classification') and (label_num>1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k,:] /= sp.maximum (norma[k], eps) 
        # Make sure there is a single label active per line for multi-class classification
        sol = binarize_predictions(solution, task='multiclass.classification')
        # For the base prediction, this solution is ridiculous in the multi-label case

    # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum (1-eps, sp.maximum (eps, pred))
    # Compute the log loss    
    pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
    if (task != 'multiclass.classification') or (label_num==1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss) 
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss) 
        #print('multiclass {}'.format(log_loss))
    return log_loss
model_sentences.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def test(self, vocab_size, use_onto_lstm, S_ind_test=None, C_ind_test=None, hierarchical=False, base=2, oov_list=None):
    X_test = C_ind_test[:,:-1] if use_onto_lstm else S_ind_test[:,:-1] # remove the last words' hyps in all sentences
    Y_inds_test = S_ind_test[:,1:]
    if hierarchical:
      test_targets = self._factor_target_indices(Y_inds_test, vocab_size, base=base)
    else:
      test_targets = [self._make_one_hot(Y_inds_test, vocab_size)]
    print >>sys.stderr, "Evaluating model on test data"
    test_loss = self.model.evaluate(X_test, test_targets)
    print >>sys.stderr, "Test loss: %.4f"%test_loss
    if oov_list is not None:
      oov_inds = [self.dp.word_index[w] for w in oov_list]
      non_oov_Y_inds = numpy.copy(Y_inds_test)
      for ind in oov_inds:
    non_oov_Y_inds[non_oov_Y_inds == ind] = 0
      non_oov_test_targets = self._factor_target_indices(non_oov_Y_inds, vocab_size, base=base)
      non_oov_test_loss = self.model.evaluate(X_test, non_oov_test_targets)
      print >>sys.stderr, "Non-oov test loss: %.4f"%non_oov_test_loss
    factored_test_preds = [-((numpy.log(pred) * target).sum(axis=-1)) for pred, target in zip(self.model.predict(X_test), test_targets)]
    test_preds = sum(factored_test_preds)
    #non_null_probs = []
    #for test_pred, inds in zip(test_preds, Y_inds_test):
    #  wanted_probs = []
    #  for tp, ind in zip(test_pred, inds):
    #    if ind != 0:
    #      wanted_probs.append(tp)
    #  non_null_probs.append(wanted_probs)
    #return non_null_probs
    return test_preds
uwb_tracker_node.py 文件源码 项目:uwb_tracker_ros 作者: eth-ait 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def update_filter(self, timestep, estimate, ranges):
        """Update position filter.

        Args:
             timestep (float): Time elapsed since last update.
             estimate (StateEstimate): Position estimate to update.
             ranges (list of floats): Range measurements.

        Returns:
            new_estimate (StateEstimate): Updated position estimate.
            outlier_flag (bool): Flag indicating whether the measurement was rejected as an outlier.
        """
        num_of_units = len(ranges)
        x = estimate.state
        P = estimate.covariance
        # Compute process matrix and covariance matrices
        F, Q, R = self._compute_process_and_covariance_matrices(timestep)
        # rospy.logdebug('F: {}'.format(F))
        # rospy.logdebug('Q: {}'.format(Q))
        # rospy.logdebug('R: {}'.format(R))
        # Prediction
        x = np.dot(F, x)
        P = np.dot(F, np.dot(P, F.T)) + Q
        # Update
        n = np.copy(x)
        H = np.zeros((num_of_units, x.size))
        z = np.zeros((num_of_units, 1))
        h = np.zeros((num_of_units, 1))
        for i in xrange(self.ikf_iterations):
            n, K, outlier_flag = self._ikf_iteration(x, n, ranges, h, H, z, estimate, R)
        if outlier_flag:
            new_estimate = estimate
        else:
            new_state = n
            new_covariance = np.dot((np.eye(6) - np.dot(K, H)), P)
            new_estimate = UWBTracker.StateEstimate(new_state, new_covariance)
        return new_estimate, outlier_flag
unet_d8g_222f.py 文件源码 项目:kaggle_dsb2017 作者: astoc 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def augmentation(scans,masks,n):
    datagen = ImageDataGenerator(
        featurewise_center=False,   
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,  
        rotation_range=25,   # was 25
        width_shift_range=0.3,  # ws 0.3; was 0.1# tried 0.01
        height_shift_range=0.3,   # was 0.3; was 0.1 # tried 0.01
        horizontal_flip=True,   
        vertical_flip=True,  
        zoom_range=False)
    i=0
    scans_g=scans.copy()
    for batch in datagen.flow(scans, batch_size=1, seed=1000): 
        scans_g=np.vstack([scans_g,batch])
        i += 1
        if i > n:
            break
    i=0
    masks_g=masks.copy()
    for batch in datagen.flow(masks, batch_size=1, seed=1000): 
        masks_g=np.vstack([masks_g,batch])
        i += 1
        if i > n:
            break
    return((scans_g,masks_g))
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
report.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def add_many(self, elems):
        self.active = True
        elems = np.copy(elems).astype(np.int_)
        elems[elems > self.max_value] = 1 + self.max_value
        self.counts += np.bincount(elems, minlength=len(self.counts))


问题


面经


文章

微信
公众号

扫码关注公众号