python类set_trace()的实例源码

tests.py 文件源码 项目:django-corenlp 作者: arunchaganty 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_annotate_filter(self):
        """
        Filter mentions that are 'O' mentions.
            the mentions should correctly identify:
            - character offsets
            - glosses
            - links to canonical_mentions
            - links to parent_mentions
        """
        sentences, mentions = annotate_document(self._doc, self._client,
                                                mention_filter=lambda mentions:[m for m in mentions if m.type != 'O'])

        ipdb.set_trace()

        # Just assert counts.
        self.assertEqual(3, len(sentences))
        self.assertEqual(19, len(mentions))

        for m in mentions:
            self.assertTrue(m.ner != 'O')
SENN.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(self, inf_targets, inf_vads, targets, vads, mtl_fac):
        '''
        Loss definition
        Only speech inference loss is defined and work quite well
        Add VAD cross entropy loss if you want
        '''
        loss_v1 = tf.nn.l2_loss(inf_targets - targets) / self.batch_size
        loss_o = loss_v1
        reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        # ipdb.set_trace()
        loss_v = loss_o + tf.add_n(reg_loss)
        tf.scalar_summary('loss', loss_v)
        # loss_merge = tf.cond(
        #     is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v),
        #     lambda: tf.scalar_summary('loss', loss_v))
        return loss_v, loss_o
        # return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
data_set_gen.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def transform(audio_data, save_image_path, nFFT=256, overlap=0.75):
    '''audio_data: signals to convert
    save_image_path: path to store the image file'''
    # spectrogram
    freq_data = stft(audio_data, nFFT, overlap)
    freq_data = np.maximum(np.abs(freq_data),
                           np.max(np.abs(freq_data)) / 10000)
    log_freq_data = 20. * np.log10(freq_data / 1e-4)
    N_samples = log_freq_data.shape[0]
    # log_freq_data = np.maximum(log_freq_data, max_m - 70)
    # print(np.max(np.max(log_freq_data)))
    # print(np.min(np.min(log_freq_data)))
    log_freq_data = np.round(log_freq_data)
    log_freq_data = np.transpose(log_freq_data)
    # ipdb.set_trace()

    assert np.max(np.max(log_freq_data)) < 256, 'spectrogram value too large'
    # save the image
    spec_imag = Image.fromarray(log_freq_data)
    spec_imag = spec_imag.convert('RGB')
    spec_imag.save(save_image_path)
    return N_samples
tweets_analyzer.py 文件源码 项目:tweets_analyzer 作者: x0rz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def print_stats(dataset, top=5):
    """ Displays top values by order """
    sum = numpy.sum(list(dataset.values()))
    i = 0
    if sum:
        sorted_keys = sorted(dataset, key=dataset.get, reverse=True)
        max_len_key = max([len(x) for x in sorted_keys][:top])  # use to adjust column width
        for k in sorted_keys:
            try:
                print(("- \033[1m{:<%d}\033[0m {:>6} {:<4}" % max_len_key)
                      .format(k, dataset[k], "(%d%%)" % ((float(dataset[k]) / sum) * 100)))
            except:
                import ipdb
                ipdb.set_trace()
            i += 1
            if i >= top:
                break
    else:
        print("No data")
    print("")
network.py 文件源码 项目:Automatic_Group_Photography_Enhancement 作者: Yuliang-Zou 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def extract(self, data_path, session, saver):
        saver.restore(session, data_path)
        scopes = ['conv1_1','conv1_2','conv2_1','conv2_2','conv3_1','conv3_2','conv3_3','conv4_1','conv4_2','conv4_3','conv5_1','conv5_2','conv5_3','rpn_conv/3x3','rpn_cls_score','rpn_bbox_pred','fc6','fc7','cls_score','bbox_pred']
        data_dict = {}
        for scope in scopes:
            # Freezed layers
            if scope in ['conv1_1','conv1_2','conv2_1','conv2_2']:
                [w, b] = tf.get_collection(tf.GraphKeys.VARIABLES, scope=scope)
            # We don't need momentum variables
            else:
                [w, b] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
            data_dict[scope] = {'weights':w.eval(), 'biases':b.eval()}
        file_name = data_path[0:-5]
        np.save(file_name, data_dict)
        ipdb.set_trace()       
        return file_name + '.npy'
cross_entropy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        log_y = cupy.log(x + 1e-5)
        self.y = x

    if(self.debug):
        ipdb.set_trace()

        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? 0 : log_y[_j * n_channel + t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
weighted_cross_entropy.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        log_y = cupy.log(x + 1e-5)
        self.y = x

    if(self.debug):
        ipdb.set_trace()

        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff, raw T weights', 'T out',
            't == -1 ? 0 : log_y[_j * n_channel + t] * weights[t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff, self.weights.reduced_view())
        return ret,
lm_base.py 文件源码 项目:LM_GANS 作者: anirudh9119 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
    probs = []

    n_done = 0

    for x in iterator:
        n_done += len(x)

        x, x_mask = prepare_data(x, n_words=options['n_words'])

        pprobs = f_log_probs(x, x_mask)
        for pp in pprobs:
            probs.append(pp)

        if numpy.isnan(numpy.mean(probs)):
            ipdb.set_trace()

        if verbose:
            print >>sys.stderr, '%d samples computed' % (n_done)

    return numpy.array(probs)
primitive_defaults.py 文件源码 项目:proteusisc 作者: diamondman 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, insname, *args, execute=True, read_status=False,
                 loop=0, delay=0, **kwargs):
        super(RunInstruction, self).__init__(*args, **kwargs)
        self.isBH = False
        if (self.data or self.read) and not self.bitcount:
            desc = self.dev._desc
            regname = desc._instruction_register_map.get(insname)
            self.bitcount = desc._registers.get(regname)
            if self.bitcount is None:
                #print("Dealing with a Blackhole Register")
                self.isBH = True
                self.bitcount = len(self.data)
        if not self.data and self.bitcount:
            self.data = NoCareBitarray(self.bitcount)
        if self.data is not None and len(self.data) != self.bitcount:
            import ipdb
            ipdb.set_trace()
            raise ValueError("")
        self.read_status = read_status
        self.insname = insname
        self.execute = execute
        self.delay = delay
        self.loop = loop
views.py 文件源码 项目:fileserver-chat 作者: tanmaydatta 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def index():
    global curr_time
    global syspass
    if request.method == 'GET':
        return render_template('index.html')
    elif request.method == 'POST':
        # ipdb.set_trace()
        enroll = request.form['enroll']
        passwd = request.form['pass']
        syspass = request.form['syspass']
        name = request.form['name']
        session['name'] = name
        a = soldier.run('sudo mount -t cifs //fileserver2/' + enroll + ' /mnt -o user='+enroll+',password='+passwd+',workgroup=workgroup,ip=172.16.68.30', sudo=syspass)
        # a = soldier.run()
        if os.path.isfile('/mnt/chat.txt') == False:
            a = soldier.run('sudo touch /mnt/chat.txt', sudo=syspass)
        curr_time = time.time()
        session['user']=1
        # print session['curr']
        return redirect('/chat')
rmn.py 文件源码 项目:rmn 作者: orhanf 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def pred_probs(self, stream, f_log_probs, prepare_data, verbose=True):

        options = self.options
        probs = []
        n_done = 0

        for x in stream:
            n_done += len(x)

            x, x_mask = prepare_data(x, n_words=options['n_words'])

            pprobs = f_log_probs(x, x_mask)
            for pp in pprobs:
                probs.append(pp)

            if numpy.isnan(numpy.mean(probs)):
                ipdb.set_trace()

            if verbose:
                print >>sys.stderr, '%d samples computed' % (n_done)

        return numpy.array(probs)
datagenerator.py 文件源码 项目:deep-clustering 作者: zhr1201 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, data_dir, batch_size):
        '''preprocess the training data
        data_dir: dir containing the training data
                  format:root_dir + speaker_dir + wavfiles'''
        # get dirs for each speaker
        self.speakers_dir = [os.path.join(data_dir, i)
                             for i in os.listdir(data_dir)]
        self.n_speaker = len(self.speakers_dir)
        self.batch_size = batch_size
        self.speaker_file = {}
        self.epoch = 0

        # get the files in each speakers dir
        for i in range(self.n_speaker):
            wav_dir_i = [os.path.join(self.speakers_dir[i], file)
                         for file in os.listdir(self.speakers_dir[i])]
            for j in wav_dir_i:
                if i not in self.speaker_file:
                    self.speaker_file[i] = []
                self.speaker_file[i].append(j)
        # ipdb.set_trace()
        # self.reinit()
inspect_weight_dist.py 文件源码 项目:additions_mxnet 作者: eldercrow 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def inspect_weight_dist(prefix_net, epoch):
    #
    sym, arg_params, aux_params = mx.model.load_checkpoint(prefix_net, epoch)

    quantize_bit = 5

    err_log = {}
    err_uni = {}

    err_diff = []

    for k in sorted(arg_params):
        if not k.endswith('_weight'):
            continue
        v = arg_params[k].asnumpy().ravel()

        err_log[k] = measure_log_quantize_error(v, quantize_bit)
        err_uni[k] = measure_uni_quantize_error(v, quantize_bit)

        err_diff.append(err_log[k] - err_uni[k])

    plt.plot(range(len(err_diff)), err_diff)

    import ipdb
    ipdb.set_trace()
demo_pvanet.py 文件源码 项目:additions_mxnet 作者: eldercrow 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def generate_batch(im):
    """
    preprocess image, return batch
    :param im: cv2.imread returns [height, width, channel] in BGR
    :return:
    data_batch: MXNet input batch
    data_names: names in data_batch
    im_scale: float number
    """
    import ipdb
    ipdb.set_trace()
    im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE, stride=config.IMAGE_STRIDE)
    im_array = transform(im_array, PIXEL_MEANS)
    im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)
    data = [mx.nd.array(im_array), mx.nd.array(im_info)]
    data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]
    data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
    return data_batch, DATA_NAMES, im_scale
prepkit.py 文件源码 项目:nature_methods_multicut_pipeline 作者: ilastik 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, inp=None):
        """Apply preptrain to input `inp`."""
        # Parse
        inp = (self.x if inp is None else inp)
        # Instantiate an interloop container
        itc = inp

        # Loop
        for coach in self.train:
            try:
                itc = coach(itc)
            except Exception as e:
                if self._debug:
                    print("Exception raised, entering debugger. Hit 'q' followed by 'return' to exit.")
                    import ipdb
                    ipdb.set_trace()
                else:
                    raise e

        # Assign and return
        self.y = itc
        return self.y
lm.py 文件源码 项目:rnn_benchmarks 作者: caglar 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def pred_probs(f_log_probs, options, iterator, verbose=True):
    probs = []
    n_done = 0
    for x, y in iterator:
        n_done += len(x)

        pprobs = f_log_probs(x)
        for pp in pprobs:
            probs.append(pp)

        if numpy.isnan(numpy.mean(probs)):
            ipdb.set_trace()

        if verbose:
            print >>sys.stderr, '%d samples computed' % (n_done)

    return numpy.array(probs)
pytorch_utils.py 文件源码 项目:mcnPyTorch 作者: albanie 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, x):
        # mini = list(self.features.children())[:4]
        # mini_f = torch.nn.modules.Sequential(*mini) ;
        # y = mini_f(x)
        # ipdb.set_trace()
        # mini = list(self.features.children())

        x = self.features(x)
        if self.flatten_loc == 'classifier':
            x = x.view(x.size(0), -1)
            x = self.classifier(x)
        elif self.flatten_loc == 'end':
            x = self.classifier(x)
            x = x.view(x.size(0), -1)
        else:
            msg = 'unrecognised flatten_loc: {}'.format(self.flatten_loc)
            raise ValueError(msg)
        return x
util.py 文件源码 项目:tf_fcn 作者: Yuliang-Zou 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_original_size(mask, max_size=(640,640)):
    row = None
    col = None
    for i in range(max_size[0]-1, -1, -1):
        if mask[i,0,0] == 1:
            row = i + 1
            break

    for i in range(max_size[1]-1, -1, -1):
        if mask[0,i,0] == 1:
            col = i + 1
            break

    if row is None or col is None:
        ipdb.set_trace()
    return row, col
tweets_analyzer.py 文件源码 项目:hackerbot 作者: omergunal 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def print_stats(dataset, top=5):
    """ Displays top values by order """
    sum = numpy.sum(list(dataset.values()))
    i = 0
    if sum:
        sorted_keys = sorted(dataset, key=dataset.get, reverse=True)
        max_len_key = max([len(x) for x in sorted_keys][:top])  # use to adjust column width
        for k in sorted_keys:
            try:
                print(("- \033[1m{:<%d}\033[0m {:>6} {:<4}" % max_len_key)
                      .format(k, dataset[k], "(%d%%)" % ((float(dataset[k]) / sum) * 100)))
            except:
                import ipdb
                ipdb.set_trace()
            i += 1
            if i >= top:
                break
    else:
        print("No data")
    print("")
extensions.py 文件源码 项目:dl4mt-multi 作者: nyu-dl 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def do(self, callback_name, *args):
        probs = {}
        print ''
        logger.info(" Computing log-probs...")
        start = time.time()
        for cg_name, stream in self.streams.iteritems():
            probs[cg_name] = list()
            src_id, trg_id = p_(cg_name)

            # handle multi-source stream
            src_idx = self.enc_ids.index(src_id)
            trg_idx = self.dec_ids.index(trg_id)

            for i, batch in enumerate(stream.get_epoch_iterator()):
                batch_size = batch[0].shape[0]
                src_sel = numpy.zeros(
                    (batch_size, self.num_encs)).astype(theano.config.floatX)
                src_sel[:, src_idx] = 1.
                trg_sel = numpy.zeros(
                    (batch_size, self.num_decs)).astype(theano.config.floatX)
                trg_sel[:, trg_idx] = 1.

                inps = [batch[0].T, batch[1].T, batch[2].T, batch[3].T,
                        src_sel, trg_sel]

                pprobs = self.f_log_probs[cg_name](*inps)
                probs[cg_name].append(pprobs.tolist())

                if numpy.isnan(numpy.mean(probs[cg_name])):
                    import ipdb
                    ipdb.set_trace()

            print 'logprob for CG [{}]: {}'.format(
                cg_name, numpy.mean(probs[cg_name]))

        print "took {} seconds.".format(time.time()-start)
        records = [('logprob_' + k, numpy.mean(v))
                   for k, v in probs.iteritems()]
        self.add_records(self.main_loop.log, records)
ln_lstm2.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):
            c, h = state

            # change bias argument to False since LN will add bias via shift
            concat = tf.nn.rnn_cell._linear(
                [inputs, h], 4 * self._num_units, False)
            # ipdb.set_trace()

            i, j, f, o = tf.split(1, 4, concat)

            # add layer normalization to each gate
            i = ln(i, scope='i/')
            j = ln(j, scope='j/')
            f = ln(f, scope='f/')
            o = ln(o, scope='o/')

            new_c = (c * tf.nn.sigmoid(f + self._forget_bias) +
                     tf.nn.sigmoid(i) * self._activation(j))

            # add layer_normalization in calculation of new hidden state
            new_h = self._activation(
                ln(new_c, scope='new_h/')) * tf.nn.sigmoid(o)
            new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
            return new_h, new_state
SENN.py 文件源码 项目:CNN-for-single-channel-speech-enhancement 作者: zhr1201 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def inference(self, images, is_train):
        '''Net configuration as the original paper'''
        image_input = tf.reshape(images, [-1, self.N_IN, self.NEFF, 1])
        # ipdb.set_trace()
        with tf.variable_scope('con1') as scope:
            h_conv1 = self._conv_layer_wrapper(image_input, 12, 13, is_train)
        with tf.variable_scope('con2') as scope:
            h_conv2 = self._conv_layer_wrapper(h_conv1, 16, 11, is_train)
        with tf.variable_scope('con3') as scope:
            h_conv3 = self._conv_layer_wrapper(h_conv2, 20, 9, is_train)
        with tf.variable_scope('con4') as scope:
            h_conv4 = self._conv_layer_wrapper(h_conv3, 24, 7, is_train)
        with tf.variable_scope('con5') as scope:
            h_conv5 = self._conv_layer_wrapper(h_conv4, 32, 7, is_train)
        with tf.variable_scope('con6') as scope:
            h_conv6 = self._conv_layer_wrapper(h_conv5, 24, 7, is_train)
        with tf.variable_scope('con7') as scope:
            h_conv7 = self._conv_layer_wrapper(h_conv6, 20, 9, is_train)
        with tf.variable_scope('con8') as scope:
            h_conv8 = self._conv_layer_wrapper(h_conv7, 16, 11, is_train)
        with tf.variable_scope('con9') as scope:
            h_conv9 = self._conv_layer_wrapper(h_conv8, 12, 13, is_train)
        with tf.variable_scope('con10') as scope:
            f_w = h_conv9.get_shape()[1].value
            i_fm = h_conv9.get_shape()[-1].value
            W_con10 = weight_variable(
                [f_w, 129, i_fm, 1])
            b_conv10 = bias_variable([1])
            h_conv10 = conv2d(h_conv9, W_con10) + b_conv10
        return tf.reshape(h_conv10, [-1, self.NEFF])
audio_reader.py 文件源码 项目:CNN-for-single-channel-speech-enhancement 作者: zhr1201 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self,
                 audio_dir,
                 noise_dir,
                 coord,
                 N_IN,
                 frame_length,
                 frame_move,
                 is_val):
        '''coord: tensorflow coordinator
        N_IN: number of input frames presented to DNN
        frame_move: hopsize'''
        self.audio_dir = audio_dir
        self.noise_dir = noise_dir
        self.coord = coord
        self.N_IN = N_IN
        self.frame_length = frame_length
        self.frame_move = frame_move
        self.is_val = is_val
        self.sample_placeholder_many = tf.placeholder(
            tf.float32, shape=(None, self.N_IN, 2, frame_length))
        # queues to store the data
        if not is_val:
            self.q = tf.RandomShuffleQueue(
                200000, 5000, tf.float32, shapes=(self.N_IN, 2, frame_length))
        else:
            self.q = tf.FIFOQueue(
                200000, tf.float32, shapes=(self.N_IN, 2, frame_length))
        self.enqueue_many = self.q.enqueue_many(
            self.sample_placeholder_many + 0)
        self.audiofiles = find_files(audio_dir)
        self.noisefiles = find_files(noise_dir)
        print('%d speech found' % len(self.audiofiles))
        print('%d noise found' % len(self.noisefiles))
        # ipdb.set_trace()
main.py 文件源码 项目:factoriommo-agent 作者: factoriommo 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def main_loop(self):
        logger.debug("In main loop")
        while True:
            sleeptime = 0.1
            if self.options.debug:
                import ipdb
                ipdb.set_trace()

            try:
                logdata = self.log.q.get(False)
                self.parse_logdata(logdata)
                sleeptime = 0.1
            except Empty:
                sleeptime = 0.5
            except:
                logger.exception("Something went wrong handling some log data")

            try:
                chatdata = self.log.chat.get(False)
                self.parse_chatdata(chatdata)
                sleeptime = 0.1
            except Empty:
                if sleeptime != 0.1:
                    sleeptime = 0.5
            except:
                logger.exception("Something went wrong handling some chat data")

            try:
                wsdata = self.ws.from_server.get(False)
                self.parse_wsdata(wsdata)
                sleeptime = 0.1
            except Empty:
                if sleeptime != 0.1:
                    sleeptime = 0.5
            except:
                logger.exception("Something went wrong handling some ws data")

            sleep(sleeptime)
nmt.py 文件源码 项目:nematus 作者: hlt-mt 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True, normalize=False, alignweights=False):
    probs = []
    n_done = 0

    alignments_json = []

    for x, y in iterator:
        n_done += len(x)

        x, x_mask, y, y_mask = prepare_data(x, y,
                                            n_words_src=options['n_words_src'],
                                            n_words=options['n_words'])

        ### in optional save weights mode.
        if alignweights:
            pprobs, attention = f_log_probs(x, x_mask, y, y_mask)
            for jdata in get_alignments(attention, x_mask, y_mask):
                alignments_json.append(jdata)
        else:
            pprobs = f_log_probs(x, x_mask, y, y_mask)

        # normalize scores according to output length
        if normalize:
            lengths = numpy.array([numpy.count_nonzero(s) for s in y_mask.T])
            pprobs /= lengths

        for pp in pprobs:
            probs.append(pp)

        if numpy.isnan(numpy.mean(probs)):
            ipdb.set_trace()

        if verbose:
            print >>sys.stderr, '%d samples computed' % (n_done)

    return numpy.array(probs), alignments_json


# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
dlink_ftp.dlink.eu_filelist2.py 文件源码 项目:DLink_Harvester 作者: MikimotoH 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def parse_date(txt):
    if not txt:
        return None
    try:
        return datetime.strptime(txt, '%Y-%m-%d %H:%M:%S')
    except ValueError:
        ipdb.set_trace()
dlink_ftp.dlink.eu_filelist2.py 文件源码 项目:DLink_Harvester 作者: MikimotoH 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def guess_date(ftp_url):
    import re
    m = re.search(r'_\d{6,8}', ftp_url.split('/')[-1])
    if not m:
        return None
    m = m.group(0).strip('_')
    if len(m)==6:
        return datetime.strptime(m,'%y%m%d')
    elif len(m)==8:
        return datetime.strptime(m,'%Y%m%d')
    else:
        ipdb.set_trace()
solr_test.py 文件源码 项目:solr_presentation 作者: avolkov 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def search_phrase(text):
    print()
    print()
    print("Searching for: '%s'" % text)
    res = solr.search(text)
    print("Search results object ", res)
    print("The number of results: %d " % len(res.docs))
    print("The best result ", res.docs[0])
    import ipdb
    ipdb.set_trace()

# Exact phrase
util.py 文件源码 项目:nicfit.py 作者: nicfit 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def debugger():
    """If called in the context of an exception, calls post_mortem; otherwise
    set_trace.
    ``ipdb`` is preferred over ``pdb`` if installed.
    """
    e, m, tb = sys.exc_info()
    if tb is not None:
        _debugger.post_mortem(tb)
    else:
        _debugger.set_trace()
vrptw.py 文件源码 项目:vrptw-pgss-2016 作者: conwayje 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def plot_for_truck(k):
  global depot, customers
  n = len(customers)
  x = [element.x for element in customers[1:]]
  y = [element.y for element in customers[1:]]
  plt.scatter(x, y)
  plt.scatter(depot.x, depot.y, c="r")


  truck = trucks[k]
  x2 = []
  y2 = []

  x2.append( depot.x )
  y2.append( depot.y )
  cs = truck.ordered_customers
  for c in cs:
    x2.append( c.x )
    y2.append( c.y )

  x2.append( depot.x )
  y2.append( depot.y )

  colors = ["b","g","r","c","m","k"]

  ipdb.set_trace()

  plt.plot(x2, y2, c=colors[k], linewidth=3)

  # plt.show()


问题


面经


文章

微信
公众号

扫码关注公众号