python类zeros()的实例源码

nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
kshape.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def roll_zeropad(a, shift, axis=None):
    a = np.asanyarray(a)
    if shift == 0: return a
    if axis is None:
        n = a.size
        reshape = True
    else:
        n = a.shape[axis]
        reshape = False
    if np.abs(shift) > n:
        res = np.zeros_like(a)
    elif shift < 0:
        shift += n
        zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
        res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
    else:
        zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
        res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
    if reshape:
        return res.reshape(a.shape)
    else:
        return res
neural_network.py 文件源码 项目:Modeling_Preparation 作者: Yangruipis 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _generate_data():
    """
    ?????
    ????u(k-1) ? y(k-1)?????y(k)
    """
    # u = np.random.uniform(-1,1,200)
    # y=[]
    # former_y_value = 0
    # for i in np.arange(0,200):
    #     y.append(former_y_value)
    #     next_y_value = (29.0 / 40) * np.sin(
    #         (16.0 * u[i] + 8 * former_y_value) / (3.0 + 4.0 * (u[i] ** 2) + 4 * (former_y_value ** 2))) \
    #                    + (2.0 / 10) * u[i] + (2.0 / 10) * former_y_value
    #     former_y_value = next_y_value
    # return u,y
    u1 = np.random.uniform(-np.pi,np.pi,200)
    u2 = np.random.uniform(-1,1,200)
    y = np.zeros(200)
    for i in range(200):
        value = np.sin(u1[i]) + u2[i]
        y[i] =  value
    return u1, u2, y
libscores.py 文件源码 项目:AutoML5 作者: djajetic 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def bac_metric (solution, prediction, task='binary.classification'):
    ''' Compute the normalized balanced accuracy. The binarization and 
    the normalization differ for the multi-label and multi-class case. '''
    label_num = solution.shape[1]
    score = np.zeros(label_num)
    bin_prediction = binarize_predictions(prediction, task)
    [tn,fp,tp,fn] = acc_stat(solution, bin_prediction)
    # Bounding to avoid division by 0
    eps = 1e-15
    tp = sp.maximum (eps, tp)
    pos_num = sp.maximum (eps, tp+fn)
    tpr = tp / pos_num # true positive rate (sensitivity)
    if (task != 'multiclass.classification') or (label_num==1):
        tn = sp.maximum (eps, tn)
        neg_num = sp.maximum (eps, tn+fp)
        tnr = tn / neg_num # true negative rate (specificity)
        bac = 0.5*(tpr + tnr)
        base_bac = 0.5     # random predictions for binary case
    else: 
        bac = tpr
        base_bac = 1./label_num # random predictions for multiclass case
    bac = mvmean(bac)     # average over all classes
    # Normalize: 0 for random, 1 for perfect
    score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac))
    return score
data_converter.py 文件源码 项目:AutoML5 作者: djajetic 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def replace_missing(X):
    # This is ugly, but
    try:
        if X.getformat()=='csr':
            return X
    except:
    X[np.isnan(X)]=-999.0 #djajetic 05.09.2015
    return X #djajetic 05.09.2015

        p=len(X)
        nn=len(X[0])*2
        XX = np.zeros([p,nn])
        for i in range(len(X)):
            line = X[i]
            line1 = [0 if np.isnan(x) else x for x in line]
            line2 = [1 if np.isnan(x) else 0 for x in line] # indicator of missingness
            XX[i] = line1 + line2
    return XX
utils.py 文件源码 项目:cnn-graph-classification 作者: giannisnik 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def compute_nystrom(ds_name, use_node_labels, embedding_dim, community_detection_method, kernels):
    if ds_name=="SYNTHETIC":
        graphs, labels = generate_synthetic()
    else:
        graphs, labels = load_data(ds_name, use_node_labels)
    communities, subgraphs = compute_communities(graphs, use_node_labels, community_detection_method)

    print("Number of communities: ", len(communities))
    lens = []
    for community in communities:
        lens.append(community.number_of_nodes())

    print("Average size: %.2f" % np.mean(lens))
    Q=[]
    for idx, k in enumerate(kernels):
        model = Nystrom(k, n_components=embedding_dim)
        model.fit(communities)
        Q_t = model.transform(communities)
        Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
        Q.append(Q_t)

    return Q, subgraphs, labels, Q_t.shape
test_FFT.py 文件源码 项目:mpiFFT4py 作者: spectralDNS 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_FFT2(FFT2):
    N = FFT2.N
    if FFT2.rank == 0:
        A = random(N).astype(FFT2.float)

    else:
        A = zeros(N, dtype=FFT2.float)

    atol, rtol = (1e-10, 1e-8) if FFT2.float is float64 else (5e-7, 1e-4)
    FFT2.comm.Bcast(A, root=0)
    a = zeros(FFT2.real_shape(), dtype=FFT2.float)
    c = zeros(FFT2.complex_shape(), dtype=FFT2.complex)
    a[:] = A[FFT2.real_local_slice()]
    c = FFT2.fft2(a, c)
    B2 = zeros(FFT2.global_complex_shape(), dtype=FFT2.complex)
    B2 = rfft2(A, B2, axes=(0,1))
    assert allclose(c, B2[FFT2.complex_local_slice()], rtol, atol)
    a = FFT2.ifft2(c, a)
    assert allclose(a, A[FFT2.real_local_slice()], rtol, atol)
loader.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def load_data(from_file, input_words, grammar, max_length):
    inputs = []
    input_lengths = []
    parses = []
    labels = []
    label_lengths = []
    with open(from_file, 'r') as data:
        for line in data:
            split = line.strip().split('\t')
            if len(split) == 4:
                _, sentence, canonical, parse = split
            else:
                _, sentence, canonical = split
                parse = None
            input, in_len = vectorize(sentence, input_words, max_length, add_eos=False)
            inputs.append(input)
            input_lengths.append(in_len)
            label, label_len = grammar.vectorize_program(canonical, max_length)
            labels.append(label)
            label_lengths.append(label_len)
            if parse is not None:
                parses.append(vectorize_constituency_parse(parse, max_length, in_len))
            else:
                parses.append(np.zeros((2*max_length-1,), dtype=np.bool))
    return inputs, input_lengths, parses, labels, label_lengths
uwb_multi_range_node.py 文件源码 项目:uwb_tracker_ros 作者: eth-ait 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _read_unit_offsets(self):
        if not rospy.has_param('~num_of_units'):
            rospy.logwarn("No unit offset parameters found!")
        num_of_units = rospy.get_param('~num_of_units', 0)
        self._unit_offsets = np.zeros((num_of_units, 3))
        self._unit_coefficients = np.zeros((num_of_units, 2))
        for i in xrange(num_of_units):
            unit_params = rospy.get_param('~unit_{}'.format(i))
            x = unit_params['x']
            y = unit_params['y']
            z = unit_params['z']
            self._unit_offsets[i, :] = [x, y, z]
            p0 = unit_params['p0']
            p1 = unit_params['p1']
            self._unit_coefficients[i, :] = [p0, p1]
        rospy.loginfo("Unit offsets: {}".format(self._unit_offsets))
        rospy.loginfo("Unit coefficients: {}".format(self._unit_coefficients))
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def put_images_on_grid(images, shape=(16,8)):
    nrof_images = images.shape[0]
    img_size = images.shape[1]
    bw = 3
    img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
    for i in range(shape[1]):
        x_start = i*(img_size+bw)+bw
        for j in range(shape[0]):
            img_index = i*shape[0]+j
            if img_index>=nrof_images:
                break
            y_start = j*(img_size+bw)+bw
            img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
        if img_index>=nrof_images:
            break
    return img
data_preprocessing_video.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def encode_and_store(batch_x, output_dir, file_name):
    """
    Args:
        1. batch_x:         Batch of 32*32 images which will go inside our autoencoder.
        2. output_dir:      Dir path for storing all encoded features for given `batch_x`.
                            Features will be stored in the form of JSON file.
        3. file_name:       File name of JSON file.
    """
    global AUTO_ENCODER
    if AUTO_ENCODER is None:
        load_AE()

    norm_batch = np.zeros(batch_x.shape)
    for i in range(len(batch_x)):
        norm_batch[i] = (batch_x[i] - np.mean(batch_x[i])) / np.std(batch_x[i])

    output_dict = {
        'name' : file_name,
        'encoded': AUTO_ENCODER.transform(norm_batch).tolist()}

    with open(output_dir+file_name+'.json', 'w') as f:
        json.dump(output_dict, f)
fields.py 文件源码 项目:pyfds 作者: emtpb 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def material_vector(self, mat_parameter):
        """Get a vector that contains the specified material parameter for every point of the
        field.

        Args:
            mat_parameter: Material parameter of interest.

        Returns:
            Vector which contains the specified material parameter for each point in the field.
        """

        param_found = False
        mat_vector = np.zeros(self.num_points)

        for mat_reg in self.material_regions:
            for mat in mat_reg.materials:
                if hasattr(mat, mat_parameter):
                    mat_vector[mat_reg.region.indices] = getattr(mat, mat_parameter)
                    param_found = True

        if not param_found:
            wn.warn('Material parameter {} not found in set materials. Returning zeros.'
                    .format(mat_parameter), stacklevel=2)

        return mat_vector
optimize.py 文件源码 项目:fxnn 作者: khaotik 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def compile(self,s_inputs_, s_loss_, v_params_, s_grads_=None, s_reg_=0, fetches_=None, updates_=None, givens_=None, trunc_grad_=None, profile_=False):
        def get_shared_shape(v):
            return v.get_value(borrow=True, return_internal_type=True).shape
        if type(s_inputs_) not in (list, tuple):
            s_inputs_ = [s_inputs_]
        if isinstance(updates_, dict):
            updates_= list(updates_.items())
        super(AdamSGD,self).compile(
            s_inputs_, s_loss_, v_params_, s_reg_=s_reg_, s_grads_=s_grads_, trunc_grad_=trunc_grad_)
        self.v_m = [th.shared(value=np.zeros(get_shared_shape(p), th.config.floatX), name='adam_m_'+p.name if p.name is not None else None) for p in v_params_]
        self.v_v = [th.shared(value=np.zeros(get_shared_shape(p), th.config.floatX), name='adam_v_'+p.name if p.name is not None else None) for p in v_params_]
        s_b1 = T.scalar('adam_b1'); s_b2 = T.scalar('adam_b2')
        s_b1s = T.scalar('adam_b1s'); s_b2s = T.scalar('adam_b2s')
        update_m = [(m, (m*s_b1 + (1.-s_b1)*g)) for m,g in zip(self.v_m,self.s_grads)]
        update_v = [(v, (v*s_b2 + (1.-s_b2)*g*g)) for v,g in zip(self.v_v,self.s_grads)]
        apply_grad = [(p, p-(s_b1s*m*self.s_lr)/(T.sqrt(s_b2s*v)+self.eps)) for p,m,v in zip(v_params_,self.v_m,self.v_v)]
        self.fn_train = th.function(
            inputs=[self.s_lr]+s_inputs_+[s_b1,s_b2,s_b1s,s_b2s],
            outputs=fetches_,
            updates=update_m+update_v+apply_grad+(updates_ if updates_ else []),
            on_unused_input='warn',
            givens=givens_, profile=profile_)
        self.fn_rst = th.function(inputs=[], updates=[(v, T.zeros_like(v)) for v in self.v_m+self.v_v], profile=profile_)
        return self.fn_train
cluster.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 53 收藏 0 点赞 0 评论 0
def silhouette_score(series, clusters):
    distances = np.zeros((series.shape[0], series.shape[0]))
    for idx_a, metric_a in enumerate(series):
        for idx_b, metric_b in enumerate(series):
            distances[idx_a, idx_b] = _sbd(metric_a, metric_b)[0]
    labels = np.zeros(series.shape[0])
    for i, (cluster, indicies) in enumerate(clusters):
        for index in indicies:
            labels[index] = i

    # silhouette is only defined, if we have 2 clusters with assignments at 
    # minimum
    if len(np.unique(labels)) == 1 or (len(np.unique(labels)) >= distances.shape[0]):
    #if len(np.unique(labels)) == 1:
        return labels, -1
    else:
        return labels, _silhouette_score(distances, labels, metric='precomputed')
structure.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def make_complete_graph(num_vertices):
    """Constructs a complete graph.

    The pairing function is: k = v1 + v2 * (v2 - 1) // 2

    Args:
        num_vertices: Number of vertices.

    Returns: A tuple with elements:
        V: Number of vertices.
        K: Number of edges.
        grid: a 3 x K grid of (edge, vertex, vertex) triples.
    """
    V = num_vertices
    K = V * (V - 1) // 2
    grid = np.zeros([3, K], np.int32)
    k = 0
    for v2 in range(V):
        for v1 in range(v2):
            grid[:, k] = [k, v1, v2]
            k += 1
    return grid
structure.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def make_tree(edges):
    """Constructs a tree graph from a set of (vertex,vertex) pairs.

    Args:
        edges: A list or set of unordered (vertex, vertex) pairs.

    Returns: A tuple with elements:
        V: Number of vertices.
        E: Number of edges.
        grid: a 3 x E grid of (edge, vertex, vertex) triples.
    """
    assert all(isinstance(edge, tuple) for edge in edges)
    edges = [tuple(sorted(edge)) for edge in edges]
    edges.sort()
    E = len(edges)
    grid = np.zeros([3, E], np.int32)
    for e, (v1, v2) in enumerate(edges):
        grid[:, e] = [e, v1, v2]
    return grid
structure.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def jit_remove_edge(grid, e2k, neighbors, components, e):
    """Remove an edge from a spanning tree."""
    k = e2k[e]
    v1, v2 = grid[1:3, k]
    jit_set_remove(neighbors[v1], v2)
    jit_set_remove(neighbors[v2], v1)
    stack = np.zeros(neighbors.shape[0], np.int16)
    jit_set_add(stack, v1)
    while stack[0]:
        v1 = jit_set_pop(stack)
        components[v1] = True
        for i in range(neighbors[v1, 0]):
            v2 = neighbors[v1, i + 1]
            if not components[v2]:
                jit_set_add(stack, v2)
    return k
structure.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def triangular_to_square(grid, triangle):
    """Convert a packed triangular matrix to a square matrix.

    Args:
        grid: A 3 x K array as returned by make_complete_graph().
        triangle: A length-K array.

    Returns:
        A square symmetric V x V array with zero on the diagonal.
    """
    K = len(triangle)
    assert grid.shape == (3, K)
    V = int(round(0.5 + (0.25 + 2 * K)**0.5))
    assert K == V * (V - 1) // 2
    square = np.zeros([V, V], dtype=triangle.dtype)
    square[grid[1, :], grid[2, :]] = triangle
    square[grid[2, :], grid[1, :]] = triangle
    return square
generate.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def generate_model_file(num_rows, num_cols, num_cats=4, rate=1.0):
    """Generate a random model.

    Returns:
        The path to a gzipped pickled model.
    """
    path = os.path.join(DATA, '{}-{}-{}-{:0.1f}.model.pkz'.format(
        num_rows, num_cols, num_cats, rate))
    V = num_cols
    K = V * (V - 1) // 2
    if os.path.exists(path):
        return path
    print('Generating {}'.format(path))
    if not os.path.exists(DATA):
        os.makedirs(DATA)
    dataset_path = generate_dataset_file(num_rows, num_cols, num_cats, rate)
    dataset = pickle_load(dataset_path)
    table = dataset['table']
    tree_prior = np.zeros(K, dtype=np.float32)
    config = make_config(learning_init_epochs=5)
    model = train_model(table, tree_prior, config)
    pickle_dump(model, path)
    return path
util.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def quantize_from_probs2(probs, resolution):
    """Quantize multiple non-normalized probs to given resolution.

    Args:
        probs: An [N, M]-shaped numpy array of non-normalized probabilities.

    Returns:
        An [N, M]-shaped array of quantized probabilities such that
        np.all(result.sum(axis=1) == resolution).
    """
    assert len(probs.shape) == 2
    N, M = probs.shape
    probs = probs / probs.sum(axis=1, keepdims=True)
    result = np.zeros(probs.shape, np.int8)
    range_N = np.arange(N, dtype=np.int32)
    for _ in range(resolution):
        sample = probs.argmax(axis=1)
        result[range_N, sample] += 1
        probs[range_N, sample] -= 1.0 / resolution
    return result
util.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def make_ragged_index(columns):
    """Make an index to hold data in a ragged array.

    Args:
        columns: A list of [N, _]-shaped numpy arrays of varying size, where
            N is the number of rows.

    Returns:
        A [len(columns)+1]-shaped array of begin,end positions of each column.
    """
    ragged_index = np.zeros([len(columns) + 1], dtype=np.int32)
    ragged_index[0] = 0
    for v, column in enumerate(columns):
        ragged_index[v + 1] = ragged_index[v] + column.shape[-1]
    ragged_index.flags.writeable = False
    return ragged_index
util.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def count_observations(ragged_index, data):
    """Count the observations in each cell of a ragged data array.

    Args:
        ragged_index: A [V+1]-shaped numpy array as returned by
            make_ragged_index.
        data: A [N, R]-shaped ragged array of multinomial count data, where
            N is the number of rows and R = ragged_index[-1].

    Returns:
        A [N, V]-shaped array whose entries are the number of observations
        in each cell of data.
    """
    N, R = data.shape
    assert R == ragged_index[-1]
    V = len(ragged_index) - 1
    counts = np.zeros([N, V], np.int8)
    for v in range(V):
        beg, end = ragged_index[v:v + 2]
        counts[:, v] = data[:, beg:end].sum(axis=1)
    return counts
rhd.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        local_chunk = numpy.zeros((self.nb_channels, local_shape), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        count = 0

        for s in data_slice:
            t_slice = len(s)//self.nb_channels
            local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels)
            count += t_slice

        local_chunk = local_chunk.T
        self._close()

        if nodes is not None:
            if not numpy.all(nodes == numpy.arange(self.nb_channels)):
                local_chunk = numpy.take(local_chunk, nodes, axis=1)

        return self._scale_data_to_float32(local_chunk)
openephys.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        if nodes is None:
            nodes = numpy.arange(self.nb_channels)

        local_chunk = numpy.zeros((local_shape, len(nodes)), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        for count, i in enumerate(nodes):
            local_chunk[:, count] = self.data[i][data_slice]
        self._close()

        return self._scale_data_to_float32(local_chunk)
neuralynx.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        if nodes is None:
            nodes = numpy.arange(self.nb_channels)

        local_chunk = numpy.zeros((local_shape, len(nodes)), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        for count, i in enumerate(nodes):
            local_chunk[:, count] = self.data[i][data_slice]
        self._close()

        return self._scale_data_to_float32(local_chunk)
generate_missing_data.py 文件源码 项目:inductive-pooling 作者: HUJI-Deep 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def corrupt_image(img, MAR_prob=0, min_rects=0, max_rects=0, min_width=0, max_width=0):
    new_img = img.copy()
    mask = np.zeros(img.shape[0:2], dtype=np.bool)
    if MAR_prob > 0:
        mask[(random_sample(mask.shape) < MAR_prob)] = True
    if max_rects > 0 and max_width > 0:
        h, w = mask.shape
        num_rects = random_integers(min_rects, max_rects)
        for i in range(num_rects):
            px1 = random_integers(0, w - min(max(min_width, 1), w))
            py1 = random_integers(0, h - min(max(min_width, 1), h))
            px2 = px1 + (min_width - 1) + random_integers(0, max(min(w - px1 - min_width, max_width - min_width), 0));
            py2 = py1 + (min_width - 1) + random_integers(0, max(min(h - py1 - min_width, max_width - min_width), 0));
            if px1 <= px2 and py1 <= py2:
                mask[py1:py2, px1:px2] = True
            else:
                # One of the sides has length 0, so we should remove any pixels4
                pass
    if len(new_img.shape) == 2:
        new_img[mask] = 0
    else:
        new_img[mask,:] = 0
    return (new_img, 1.0 * mask)

# Process command line inputs
pascal_voc_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def decode_segmap(self, temp, plot=False):
        label_colours = self.get_pascal_labels()
        r = temp.copy()
        g = temp.copy()
        b = temp.copy()
        for l in range(0, self.n_classes):
            r[temp == l] = label_colours[l, 0]
            g[temp == l] = label_colours[l, 1]
            b[temp == l] = label_colours[l, 2]

        rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
        rgb[:, :, 0] = r / 255.0
        rgb[:, :, 1] = g / 255.0
        rgb[:, :, 2] = b / 255.0
        if plot:
            plt.imshow(rgb)
            plt.show()
        else:
            return rgb
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
sudoku.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
genderclassifier.py 文件源码 项目:namegenderclassifier 作者: joaoalvarenga 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __parse_name(self, name):
        x = np.zeros(len(self.__trigrams_indexes)+len(self.__char_indexes))
        if name in self.__trigrams_indexes.keys():
            x[self.__trigrams_indexes[name[len(name)-3:]]] == 1
        for c in set(name):
            if c in self.__char_indexes.keys():
                x[len(self.__trigrams_indexes)+self.__char_indexes[c]] = name.count(c)/float(len(name))
        return x


问题


面经


文章

微信
公众号

扫码关注公众号