python类default_timer()的实例源码

call.py 文件源码 项目:run_lambda 作者: ethantkoenig 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build(self):
            end_time = timeit.default_timer()
            end_mem = memory_profiler.memory_usage()[0]

            sys.stdout = self._previous_stdout

            self._log.write("END RequestId: {r}\n".format(
                r=self._context.aws_request_id))

            duration_in_millis = int(math.ceil(1000 * (end_time - self._start_time)))
            # The memory overhead of setting up the AWS Lambda environment
            # (when actually run in AWS) is roughly 14 MB
            max_memory_used_in_mb = (end_mem - self._start_mem) / 1048576 + 14

            self._log.write(
                "REPORT RequestId: {r}\tDuration: {d} ms\t"
                "Max Memory Used: {m} MB\n"
                .format(r=self._context.aws_request_id,
                        d=duration_in_millis,
                        m=max_memory_used_in_mb))

            log = self._log.getvalue()
            return LambdaCallSummary(duration_in_millis, max_memory_used_in_mb, log)
games.py 文件源码 项目:Harmonbot 作者: Harmon758 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def reaction_time(self, ctx):
        '''Reaction time game'''
        response, embed = await self.bot.say("Please choose 10 reactions")
        while len(response.reactions) < 10:
            await self.bot.wait_for_reaction(message = response)
            response = await self.bot.get_message(ctx.message.channel, response.id)
        reactions = response.reactions
        reaction = random.choice(reactions)
        await self.bot.edit_message(response, "Please wait..")
        for _reaction in reactions:
            try:
                await self.bot.add_reaction(response, _reaction.emoji)
            except discord.errors.HTTPException:
                await self.bot.edit_message(response, ":no_entry: Error: Please don't deselect your reactions before I've selected them")
                return
        for countdown in range(10, 0, -1):
            await self.bot.edit_message(response, "First to select the reaction _ wins.\nMake sure to have all the reactions deselected.\nGet ready! {}".format(countdown))
            await asyncio.sleep(1)
        await self.bot.edit_message(response, "First to select the reaction {} wins. Go!".format(reaction.emoji))
        start_time = timeit.default_timer()
        winner = await self.bot.wait_for_reaction(message = response, emoji = reaction.emoji)
        elapsed = timeit.default_timer() - start_time
        await self.bot.edit_message(response, "{} was the first to select {} and won with a time of {:.5} seconds!".format(winner.user.display_name, reaction.emoji, elapsed))
aesop.py 文件源码 项目:aesop 作者: BioMoDeL 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run(self):
        """Summary
        Perform a compuational alanine scan on the initialized Alascan class.

        Returns
        -------
        None
            Outputs text to STDOUT when run is complete, will be made optional
            in the future.
        """
        start = ti.default_timer()
        self.logs = []
        self.genTruncatedPQR()
        self.calcAPBS()
        self.calcCoulomb()
        self.status = 1
        stop = ti.default_timer()
        print '%s:\tAESOP alanine scan completed in %.2f seconds' % (
            self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
aesop.py 文件源码 项目:aesop 作者: BioMoDeL 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def run(self):
        """Summary
        Perform a directed mutagenesis scan on the initialized class.

        Returns
        -------
        None
            Outputs text to STDOUT when run is complete, will be made optional
            in the future.
        """
        start = ti.default_timer()
        self.logs = []
        self.genPDB()
        self.genPQR()
        self.calcAPBS()
        self.calcCoulomb()
        stop = ti.default_timer()
        print '%s:\tAESOP directed mutagenesis scan completed' \
            ' in %.2f seconds' % (self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
speedtest.py 文件源码 项目:SmartSocks 作者: waylybaye 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def run(self):
        request = self.request
        try:
            if ((timeit.default_timer() - self.starttime) <= self.timeout and
                    not SHUTDOWN_EVENT.isSet()):
                try:
                    f = urlopen(request)
                except TypeError:
                    # PY24 expects a string or buffer
                    # This also causes issues with Ctrl-C, but we will concede
                    # for the moment that Ctrl-C on PY24 isn't immediate
                    request = build_request(self.request.get_full_url(),
                                            data=request.data.read(self.size))
                    f = urlopen(request)
                f.read(11)
                f.close()
                self.result = sum(self.request.data.total)
            else:
                self.result = 0
        except (IOError, SpeedtestUploadTimeout):
            self.result = sum(self.request.data.total)
run_w2v.py 文件源码 项目:KATE 作者: hugochan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def train(args):
    vocab = load_json(args.vocab)
    # import pdb;pdb.set_trace()
    # load corpus
    corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False)
    # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
    # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
    # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False)
    # print len([1 for x in corpus])
    corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus)
    w2v = Word2Vec(args.n_dim, window=args.window_size, \
        negative=args.negative, epoches=args.n_epoch)

    start = timeit.default_timer()
    w2v.train(corpus_iter)
    print 'runtime: %ss' % (timeit.default_timer() - start)

    save_w2v(w2v.model, args.save_model)
    import pdb;pdb.set_trace()
stream_data.py 文件源码 项目:Wall-EEG 作者: neurotechuoft 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def run(self):
        while True:
            # check FPS + listen for new connections
            new_tick = timeit.default_timer()
            elapsed_time = new_tick - self.tick
            current_samples_in = nb_samples_in
            current_samples_out = nb_samples_out
            print "--- at t: ", (new_tick - self.start_tick), " ---"
            print "elapsed_time: ", elapsed_time
            print "nb_samples_in: ", current_samples_in - self.nb_samples_in
            print "nb_samples_out: ", current_samples_out - self.nb_samples_out
            self.tick = new_tick
            self.nb_samples_in = nb_samples_in
            self.nb_samples_out = nb_samples_out
            # time to watch for connection
            # FIXME: not so great with threads
            server.check_connections()
            time.sleep(1)
csv_collect.py 文件源码 项目:Wall-EEG 作者: neurotechuoft 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, sample):
        t = timeit.default_timer() - self.start_time

        # print timeSinceStart|Sample Id
        if self.verbose:
            print("CSV: %f | %d" % (t, sample.id))

        row = ''
        row += str(t)
        row += self.delim
        row += str(sample.id)
        row += self.delim
        for i in sample.channel_data:
            row += str(i)
            row += self.delim
        for i in sample.aux_data:
            row += str(i)
            row += self.delim
        # remove last comma
        row += '\n'
        with open(self.file_name, 'a') as f:
            f.write(row)
mixins.py 文件源码 项目:Eskapade 作者: KaveIO 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def stop_timer(self, start_time=None):
        """Stop the run timer

        Stop the timer.  The timer is used to compute the run time.  The
        elapsed time since the timer start is returned.

        :param float start_time: function start_time input
        :returns: time difference with start in seconds
        :rtype: float
        """

        self._stop_time = timeit.default_timer()

        diff_time = self._stop_time - (start_time if start_time is not None else self._start_time)
        self._total_time += diff_time

        return diff_time
borgbench.py 文件源码 项目:borgbench 作者: dragetd 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def runConfig(comp, cmin, cmax, cavg, data):
    # make sure there is no leftover repository. This will throw a warning on the shell if there is no folder, but it can be ignored
    subprocess.call(["rm", "-r", "/tmp/borgbench/"+comp])
    # run borg
    subprocess.call(["borg", "init", "-e", "none", "/tmp/borgbench/"+comp])
    start = timer()
    proc=subprocess.Popen(["borg", "create", "/tmp/borgbench/"+comp+"::test", "-v", "-s", "-C", comp, data], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    output = proc.stderr.read()
    duration = timer() - start
    # parse output
    m = re.match(".*This archive: +(\d+\.?\d+ ..) +(\d+\.?\d+ ..) +(\d+\.?\d+ ..).*Chunk index: +(\d+) +(\d+)", str(output))
    if m:
        print(comp+";"+str(cmin)+";"+str(cmax)+";"+str(cavg)+";"+m.group(1)+";"+m.group(2)+";"+m.group(3)+";"+m.group(4)+";"+m.group(5)+";"+str(duration))
    else:
        print("Error")
    # and clean up
    subprocess.call(["rm", "-r", "/tmp/borgbench/"+comp])


# Benchmark calls
# For speed reasons, this should be a tmpfs
smell_datamine_multiprocessing.py 文件源码 项目:Smelly-London 作者: Smelly-London 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def main():

    start = timer()
    files = get_file_names()
    smell_results = []

    bar = progressbar.ProgressBar(max_value=len(files))
    processed_files = 0
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for file, smell in zip(files, executor.map(worker, files)):
            smell_results = smell_results + smell
            processed_files += 1
            bar.update(processed_files)
    smell_results = [x for x in smell_results if x]

    end = timer()
    print(end - start)
    dataminer = SmellDataMine()
    dataminer.save_to_database(smell_results)
SplunkDeployer.py 文件源码 项目:QXSConsolas 作者: qxsch 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def remove(self, app):
        self._affectedServers = {}
        self.app = app
        if not "--env:" in self.app.options:
            self.app.options["--env:"] = "ALL"
        else:
            self.app.options["--env:"] = self.app.options["--env:"].upper()

        if "ALL"  in self.app.options['--app:']:
            raise AppNotFoundException("Cannot create an app called ALL, because it is a reserved word")

        t = timeit.default_timer()

        ssh = SSH()
        for appname in self.app.options['--app:']:
            self._removeApp(ssh, appname)

        t = timeit.default_timer() - t
        if t < self.WarnDeploymentTime:
            self.app.logger.info("Removal took: {:.4f} seconds".format(t))
        else:
            self.app.logger.warning("Removal took: {:.4f} seconds".format(t))

        self._deleteInventoryEntries()
SplunkDeployer.py 文件源码 项目:QXSConsolas 作者: qxsch 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def backup(self, app):
        self._affectedServers = {}
        self.app = app
        self._checkNodeConfiguration()
        ssh = SSH()

        t = timeit.default_timer()

        envs = self.app.configuration.get("SplunkNodes.envs")
        role = self._roles[envs[self.app.options["--env:"]][self.app.options["--role:"]]["role"]]
        role.setRoleInfo(self.app.logger, self.app.options["--env:"], envs[self.app.options["--env:"]], self.app.options["--role:"], envs[self.app.options["--env:"]][self.app.options["--role:"]])
        self.app.logger.info("Taking a backup for the selected apps (" + ", ".join(self.app.options["--app:"]) + ") from environment \"" + self.app.options["--env:"] + "\" and role \"" + self.app.options["--role:"] + "\" to local path \"" + self.app.options["--path:"] + "\"")
        role.backup(list(self.app.options["--app:"]), ssh, self.app.options["--path:"])

        t = timeit.default_timer() - t
        if t < self.WarnDeploymentTime:
            self.app.logger.info("Backup took: {:.4f} seconds".format(t))
        else:
            self.app.logger.warning("Backup took: {:.4f} seconds".format(t))
SplunkDeployer.py 文件源码 项目:QXSConsolas 作者: qxsch 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def restore(self, app):
        self._affectedServers = {}
        self.app = app
        self._checkNodeConfiguration()
        ssh = SSH()

        t = timeit.default_timer()

        envs = self.app.configuration.get("SplunkNodes.envs")
        for appName in self.app.options["--app:"]:
            assert os.path.exists(os.path.join(self.app.options["--path:"], appName)), "The app \"" + appName + "\" does not exist under: " + self.app.options["--path:"]

        role = self._roles[envs[self.app.options["--env:"]][self.app.options["--role:"]]["role"]]
        role.setRoleInfo(self.app.logger, self.app.options["--env:"], envs[self.app.options["--env:"]], self.app.options["--role:"], envs[self.app.options["--env:"]][self.app.options["--role:"]])
        self.app.logger.info("Restoring a backup for the selected apps (" + ", ".join(self.app.options["--app:"]) + ") from local path \"" + self.app.options["--path:"] + "\" to environment \"" + self.app.options["--env:"] + "\" and role \"" + self.app.options["--role:"] + "\"")
        role.restore(list(self.app.options["--app:"]), ssh, self.app.options["--path:"])

        t = timeit.default_timer() - t
        if t < self.WarnDeploymentTime:
            self.app.logger.info("Restore took: {:.4f} seconds".format(t))
        else:
            self.app.logger.warning("Restore took: {:.4f} seconds".format(t))
compress.py 文件源码 项目:tiny-png 作者: Waterstrong 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def compress_images(target_images):
    current = 0
    total_number = len(target_images)
    total_time = 0
    for image_file in target_images:
        current += 1
        write_log('Start compressing image: {}'.format(realpath(image_file)))
        if os.path.exists(image_file):
            time_start = timeit.default_timer()
            tinify_image(image_file)
            time_diff = round(timeit.default_timer() - time_start, 2)
            total_time += time_diff
            write_log('Compression done takes {} seconds! ({}/{})\n'.format(time_diff, current, total_number))
        else:
            write_log('Ignored: target image does not exist! ({}/{})\n'.format(current, total_number))
    if total_time > 0:
        write_log('Totally takes {} seconds to complete!'.format(total_time))
poolImprovement.py 文件源码 项目:Learning-Concurrency-in-Python 作者: PacktPublishing 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main():

  t1 = timeit.default_timer()
  with ProcessPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))

  print("{} Seconds Needed for ProcessPoolExecutor".format(timeit.default_timer() - t1))

  t2 = timeit.default_timer()
  with ThreadPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))
  print("{} Seconds Needed for ThreadPoolExecutor".format(timeit.default_timer() - t2))

  t3 = timeit.default_timer()
  for number in PRIMES:
    isPrime = is_prime(number)
    print("{} is prime: {}".format(number, isPrime))
  print("{} Seconds needed for single threaded execution".format(timeit.default_timer()-t3))
base.py 文件源码 项目:scikit-kge 作者: mnick 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _optim(self, xys):
        idx = np.arange(len(xys))
        self.batch_size = np.ceil(len(xys) / self.nbatches)
        batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)

        for self.epoch in range(1, self.max_epochs + 1):
            # shuffle training examples
            self._pre_epoch()
            shuffle(idx)

            # store epoch for callback
            self.epoch_start = timeit.default_timer()

            # process mini-batches
            for batch in np.split(idx, batch_idx):
                # select indices for current batch
                bxys = [xys[z] for z in batch]
                self._process_batch(bxys)

            # check callback function, if false return
            for f in self.post_epoch:
                if not f(self):
                    break
jointanalyzer.py 文件源码 项目:tomato 作者: sertansenturk 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def filter_pitch(self, pitch, aligned_notes):
        tic = timeit.default_timer()
        self.vprint(u"- Filtering predominant melody of {0:s} after "
                    u"audio-score alignment.".format(pitch['source']))
        aligned_notes_ = [IO.dict_keys_to_camel_case(n)
                          for n in deepcopy(aligned_notes)]

        pitch_temp, notes_filtered, synth_pitch = \
            self._aligned_pitch_filter.filter(pitch['pitch'], aligned_notes_)

        notes_filtered = [IO.dict_keys_to_snake_case(n)
                          for n in notes_filtered]

        pitch_filtered = deepcopy(pitch)
        pitch_filtered['pitch'] = pitch_temp
        pitch_filtered['citation'] = 'SenturkThesis'
        pitch_filtered['procedure'] = 'Pitch filtering according to ' \
                                      'audio-score alignment'

        # print elapsed time, if verbose
        self.vprint_time(tic, timeit.default_timer())

        return pitch_filtered, notes_filtered
jointanalyzer.py 文件源码 项目:tomato 作者: sertansenturk 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def compute_note_models(self, pitch, aligned_notes, tonic_symbol):
        tic = timeit.default_timer()
        self.vprint(u"- Computing the note models for {0:s}".
                    format(pitch['source']))

        aligned_notes_ = [IO.dict_keys_to_camel_case(n)
                          for n in deepcopy(aligned_notes)]

        note_models, pitch_distribution, tonic = self._aligned_note_model.\
            get_models(pitch['pitch'], aligned_notes_, tonic_symbol)

        for note in note_models.keys():
            note_models[note] = IO.dict_keys_to_snake_case(
                note_models[note])

        tonic = IO.dict_keys_to_snake_case(tonic['alignment'])
        tonic['source'] = pitch['source']

        # print elapsed time, if verbose
        self.vprint_time(tic, timeit.default_timer())
        return note_models, pitch_distribution, tonic
audioanalyzer.py 文件源码 项目:tomato 作者: sertansenturk 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compute_melodic_progression(self, pitch):
        tic = timeit.default_timer()
        self.vprint(u"- Computing the melodic progression model of {0:s}"
                    .format(pitch['source']))

        if self._mel_prog_params['frame_dur'] is None:
            # compute number of frames from some simple "rule of thumb"
            duration = pitch['pitch'][-1][0]
            frame_dur = duration / self._mel_prog_params['min_num_frames']
            frame_dur = int(5 * round(float(frame_dur) / 5))  # round to 5sec

            # force to be between 5 and max_frame_dur
            if frame_dur < 5:
                frame_dur = 5
            elif frame_dur > self._mel_prog_params['max_frame_dur']:
                frame_dur = self._mel_prog_params['max_frame_dur']
        else:
            frame_dur = self._mel_prog_params['frame_dur']

        melodic_progression = self._melodic_progression_analyzer.analyze(
            pitch['pitch'], frame_dur=frame_dur,
            hop_ratio=self._mel_prog_params['hop_ratio'])
        self.vprint_time(tic, timeit.default_timer())

        return melodic_progression
bench.py 文件源码 项目:pymapd 作者: mapd 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def benchmark(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        warmup = kwargs.pop('warmup', False)

        if warmup:
            func(*args, **kwargs)

        kind = args[0]
        t0 = timer()
        try:
            result = func(*args, **kwargs)
        except Exception:
            logger.warning("finished,%s,%s,%s", func.__name__, kind,
                           float('nan'))
        else:
            t1 = timer()
            logger.info("finished,%s,%s,%s", func.__name__, kind, t1 - t0)
            return result

    _benchmarks.append(wrapper)
    return wrapper
camdector.py 文件源码 项目:nimo 作者: wolfram2012 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def detect(self, det_iter, show_timer=False):

        num_images = det_iter._size
        # if not isinstance(det_iter, mx.io.PrefetchingIter):
        #     det_iter = mx.io.PrefetchingIter(det_iter)
        start = timer()
        detections = self.mod.predict(det_iter).asnumpy()
        time_elapsed = timer() - start
        if show_timer:
            print("Detection time for {} images: {:.4f} sec".format(
                num_images, time_elapsed))
        result = []
        for i in range(detections.shape[0]):
            det = detections[i, :, :]
            res = det[np.where(det[:, 0] >= 0)[0]]
            result.append(res)
        return result
k_means_kdd.py 文件源码 项目:dask-ml 作者: dask 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def fit(data, use_scikit_learn=False):
    logger.info("Starting to cluster")
    # Cluster
    n_clusters = 8
    oversampling_factor = 2
    if use_scikit_learn:
        km = sk.KMeans(n_clusters=n_clusters, random_state=0)
    else:
        km = KMeans(n_clusters=n_clusters,
                    oversampling_factor=oversampling_factor,
                    random_state=0)
    t0 = tic()
    logger.info("Starting n_clusters=%2d, oversampling_factor=%2d",
                n_clusters, oversampling_factor)
    km.fit(data)
    t1 = tic()
    logger.info("Finished in %.2f", t1 - t0)
test_daemon.py 文件源码 项目:krafters 作者: GianlucaBortoli 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def rethinkdb_append_entry(connection):

    global ITERATION
    value = {"id": DEFAULT_VALUE, "value": ITERATION}


    try:
        t = timeit.default_timer()
        r.table(RETHINKDB_TABLE_NAME).insert(value, conflict='replace').run(connection, durability="hard", read_mode='majority')
        v = r.table(RETHINKDB_DB_NAME, read_mode='majority').run(connection, durability="hard", read_mode='majority')
        ITERATION += 1
        ITERATION %= 100
        logging.info('key added')
    except:
        logging.error('{} not added'.format(value))
    finally:
        return timeit.default_timer() - t
callbacks.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, transformer, callback_data, phase, data, idx):
        if phase == CallbackPhase.train_pre_:
            self.total_iterations = callback_data['config'].attrs['total_iterations']
            num_intervals = self.total_iterations // self.frequency
            for loss_name in self.interval_loss_comp.output_keys:
                callback_data.create_dataset("cost/{}".format(loss_name), (num_intervals,))
            callback_data.create_dataset("time/loss", (num_intervals,))
        elif phase == CallbackPhase.train_post:
            losses = loop_eval(self.dataset, self.interval_loss_comp)
            tqdm.write("Training complete.  Avg losses: {}".format(losses))
        elif phase == CallbackPhase.minibatch_post and ((idx + 1) % self.frequency == 0):
            start_loss = default_timer()
            interval_idx = idx // self.frequency

            losses = loop_eval(self.dataset, self.interval_loss_comp)

            for loss_name, loss in losses.items():
                callback_data["cost/{}".format(loss_name)][interval_idx] = loss

            callback_data["time/loss"][interval_idx] = (default_timer() - start_loss)
            tqdm.write("Interval {} Iteration {} complete.  Avg losses: {}".format(
                interval_idx + 1, idx + 1, losses))
network_analyzer.py 文件源码 项目:pyrpl 作者: lneuhaus 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _new_point_arrived(self, point):
        if self._paused:
            return
        self._update_benchmark()
        try:
            point = point.result()
        except CancelledError:
            self._point_cancelled()
            return #  exit the loop (could be restarted latter for RunFuture)
        self._add_point(point)

        # if zero span mode, data_x is time measured, not frequency
        if self._module.is_zero_span():
            if self.current_avg==1:
                time_now = timeit.default_timer() - self._time_first_point
                self.data_x[self.current_point] = time_now
                self._module._data_x[self.current_point] = time_now

        self.current_point+=1
        if self.current_point==self.n_points:
            self._scan_finished()
        else:
            self._setup_next_point()
linear_fft_pipeline.py 文件源码 项目:bifrost 作者: ledatelescope 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def run_benchmark(self):
        with bf.Pipeline() as pipeline:
            datafile = "numpy_data0.bin"

            bc = bf.BlockChainer()
            bc.blocks.binary_read(
                    [datafile], gulp_size=GULP_SIZE, gulp_nframe=GULP_FRAME, dtype='cf32')
            bc.blocks.copy('cuda', gulp_nframe=GULP_FRAME)
            for _ in range(NUMBER_FFT):
                bc.blocks.fft(['gulped'], axis_labels=['ft_gulped'], gulp_nframe=GULP_FRAME_FFT)
                bc.blocks.fft(['ft_gulped'], axis_labels=['gulped'], inverse=True, gulp_nframe=GULP_FRAME_FFT)

            start = timer()
            pipeline.run()
            end = timer()
            self.total_clock_time = end-start
pipeline_benchmarker.py 文件源码 项目:bifrost 作者: ledatelescope 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def timeit(self, method):
        """ Decorator for timing execution of a method 

        Returns:

            function: the original function, wrapped
                       with a time accumulator
        """
        def timed(*args, **kw):
            ts = timer()
            result = method(*args, **kw)
            te = timer()

            self.relevant_clock_time += te-ts
            return result
        return timed
driver.py 文件源码 项目:k8scntkSamples 作者: weehyong 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def init():
    """ Initialise ResNet 152 model
    """
    global trainedModel, labelLookup, mem_after_init

    start = t.default_timer()

    # Load the model and labels from disk
    with open('synset.txt', 'r') as f:
        labelLookup = [l.rstrip() for l in f]

    # Load model and load the model from brainscript (3rd index)
    trainedModel = load_model('ResNet_152.model')
    trainedModel = combine([trainedModel.outputs[3].owner])
    end = t.default_timer()

    loadTimeMsg = "Model loading time: {0} ms".format(round((end-start)*1000, 2))
    logger.info(loadTimeMsg)
utils.py 文件源码 项目:yadll 作者: pchavanne 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def timer(what_to_show="Function execution"):
    """
    decorator that send the execution time of the argument function to the logger

    Parameters
    ----------
    what_to_show : `string`, optional
        message displayed after execution

    """
    def func_wrapper(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            start_time = timeit.default_timer()
            res = func(*args, **kwargs)
            end_time = timeit.default_timer()
            s = end_time - start_time
            try:
                msg = what_to_show + ' ' + args[0].name
            except (AttributeError, IndexError, TypeError):
                msg = what_to_show
            logger.info('%s took %s' % (msg, format_sec(s)))
            return res
        return wrapper
    return func_wrapper


问题


面经


文章

微信
公众号

扫码关注公众号