python类process_time()的实例源码

tf_manager.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def execute(self,
                dataset: Dataset,
                execution_scripts,
                train=False,
                compute_losses=True,
                summaries=True,
                batch_size=None,
                log_progress: int = 0) -> List[ExecutionResult]:
        if batch_size is None:
            batch_size = len(dataset)
        batched_dataset = dataset.batch_dataset(batch_size)
        last_log_time = time.process_time()

        batch_results = [
            [] for _ in execution_scripts]  # type: List[List[ExecutionResult]]
        for batch_id, batch in enumerate(batched_dataset):
            if (time.process_time() - last_log_time > log_progress
                    and log_progress > 0):
                log("Processed {} examples.".format(batch_id * batch_size))
                last_log_time = time.process_time()
            executables = [s.get_executable(compute_losses=compute_losses,
                                            summaries=summaries,
                                            num_sessions=len(self.sessions))
                           for s in execution_scripts]

            while not all(ex.result is not None for ex in executables):
                self._run_executables(batch, executables, train)

            for script_list, executable in zip(batch_results, executables):
                script_list.append(executable.result)

        collected_results = []  # type: List[ExecutionResult]
        for result_list in batch_results:
            collected_results.append(reduce_execution_results(result_list))

        return collected_results
__init__.py 文件源码 项目:cython-workshop 作者: hroncok 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def solve(impl='python'):
    if impl == 'cython':
        solvercls = csolver.CBruteSolver
    else:
        solvercls = solver.BruteSolver
    try:
        os.mkdir('data/' + impl)
    except FileExistsError:
        pass
    for filename in sorted(glob.glob('data/*.inst.dat')):
        print(filename)
        loaded_data = list(dataloader.load_input(filename))
        count = loaded_data[0]['count']
        correct = list(dataloader.load_provided_results(
            'data/knap_{0:02d}.sol.dat'.format(count)))
        outname = filename.replace('.inst.dat', '.results.jsons')
        outname = outname.replace('data/', 'data/' + impl + '/')
        with open(outname, 'w') as f:
            filestartime = time.process_time()
            for idx, backpack in enumerate(loaded_data):
                startime = time.process_time()
                s = solvercls(backpack)
                backpack['maxcombo'], backpack['maxcost'] = s.solve()
                endtime = time.process_time()
                delta = endtime - startime
                backpack['time'] = delta
                assert backpack['maxcost'] == correct[idx]['maxcost']
                del backpack['items']
                f.write(json.dumps(backpack) + '\n')
            fileendtime = time.process_time()
            delta = fileendtime - filestartime
            f.write('{}\n'.format(delta))
__init__.py 文件源码 项目:CryExport 作者: britalmeida 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_file(operator, context, filepath="", use_selection=False, **kwargs):

    print('CryEngine export starting... %r' % filepath)
    start_time = time.process_time()
    try:
        file = open(filepath, "w", encoding="utf8", newline="\n")
    except:
        import traceback
        traceback.print_exc()
        operator.report({'ERROR'}, "Couldn't open file %r" % filepath)
        return {'CANCELLED'}

    fw = file.write

    fw('hello')

    file.close()

    # copy all collected files.
    #bpy_extras.io_utils.path_reference_copy(copy_set)

    print('export finished in %.4f sec.' % (time.process_time() - start_time))
    return {'FINISHED'}


# UI ##########################################################################
profile.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, timer=None, bias=None):
        self.timings = {}
        self.cur = None
        self.cmd = ""
        self.c_func_name = ""

        if bias is None:
            bias = self.bias
        self.bias = bias     # Materialize in local dict for lookup speed.

        if not timer:
            self.timer = self.get_time = time.process_time
            self.dispatcher = self.trace_dispatch_i
        else:
            self.timer = timer
            t = self.timer() # test out timer function
            try:
                length = len(t)
            except TypeError:
                self.get_time = timer
                self.dispatcher = self.trace_dispatch_i
            else:
                if length == 2:
                    self.dispatcher = self.trace_dispatch
                else:
                    self.dispatcher = self.trace_dispatch_l
                # This get_time() implementation needs to be defined
                # here to capture the passed-in timer in the parameter
                # list (for performance).  Note that we can't assume
                # the timer() result contains two values in all
                # cases.
                def get_time_timer(timer=timer, sum=sum):
                    return sum(timer())
                self.get_time = get_time_timer
        self.t = self.get_time()
        self.simulate_call('profiler')

    # Heavily optimized dispatch routine for os.times() timer
test_time.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_process_time(self):
        # process_time() should not include time spend during a sleep
        start = time.process_time()
        time.sleep(0.100)
        stop = time.process_time()
        # use 20 ms because process_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('process_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable)
profile.py 文件源码 项目:kbe_server 作者: xiaohaoppy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, timer=None, bias=None):
        self.timings = {}
        self.cur = None
        self.cmd = ""
        self.c_func_name = ""

        if bias is None:
            bias = self.bias
        self.bias = bias     # Materialize in local dict for lookup speed.

        if not timer:
            self.timer = self.get_time = time.process_time
            self.dispatcher = self.trace_dispatch_i
        else:
            self.timer = timer
            t = self.timer() # test out timer function
            try:
                length = len(t)
            except TypeError:
                self.get_time = timer
                self.dispatcher = self.trace_dispatch_i
            else:
                if length == 2:
                    self.dispatcher = self.trace_dispatch
                else:
                    self.dispatcher = self.trace_dispatch_l
                # This get_time() implementation needs to be defined
                # here to capture the passed-in timer in the parameter
                # list (for performance).  Note that we can't assume
                # the timer() result contains two values in all
                # cases.
                def get_time_timer(timer=timer, sum=sum):
                    return sum(timer())
                self.get_time = get_time_timer
        self.t = self.get_time()
        self.simulate_call('profiler')

    # Heavily optimized dispatch routine for os.times() timer
test_time.py 文件源码 项目:kbe_server 作者: xiaohaoppy 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_process_time(self):
        # process_time() should not include time spend during a sleep
        start = time.process_time()
        time.sleep(0.100)
        stop = time.process_time()
        # use 20 ms because process_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('process_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable)
basic_tests.py 文件源码 项目:py-prng 作者: czechnology 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def run_all(generator, n_bits, sig_level, continuous=False, print_log=False):
    # if we want all the tests to be applied to the *same* bit sequence,
    # we need to pre-compute it and create a static generator
    if not continuous:
        ts = time()
        sequence = generator.random_bytes((n_bits // 8) + 16)
        print(sequence)
        generator = StaticSequenceGenerator(seq=sequence)
        if print_log:
            print("(Sequence pre-computed in", nicer_time(time() - ts) + ')', flush=True)

    if not continuous:
        generator.rewind()  # rewind
    tf = frequency_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    ts = serial_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    tp = poker_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    tr = runs_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    tac = autocorrelation_test(generator, n_bits, d=100, sig_level=sig_level)

    return tf, ts, tp, tr, tac
Fusion360DebugUtilities.py 文件源码 项目:FusionVenter 作者: tapnair 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def perf_log(log, function_reference, command, identifier=''):
    log.append((function_reference, command, identifier, time.process_time()))
Timer.py 文件源码 项目:swarmops 作者: Hvass-Labs 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self):
        """
        Start the timer.

        :return: Object instance.
        """

        # Note that time.process_time() doesn't work with multiprocessing.

        self.start_time = time.time()
        self.end_time = self.start_time
concurrency_sample.py 文件源码 项目:cps2-gfx-editor 作者: goosechooser 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def pool_sprites(filepath):
    log = logging.getLogger('pool_sprites')
    #log.setLevel(logging.INFO)

    sprites = helper.fromlua(filepath)
    filename = filepath.split("/")[2]

    log.info("starting %s", filepath)
    time_point1 = time.process_time()

    for i, sprite in enumerate(sprites):
        tiles2d = tile_printer.make_tiles_mmap(GFX_MM, sprite.addrs2d())
        sprites[i].tiles = helper.flatten_list(tiles2d)

    time_point2 = time.process_time()
    delta_t = time_point2 - time_point1
    #log.info("making sprites took %s to complete", delta_t)

    time_point3 = time.process_time()
    put_sprites(sprites, OUTPUT_FOLDER + filename[:-4])
    time_point4 = time.process_time()

    delta_t2 = time_point4 - time_point3
    #log.info("putting sprites took %s to complete", delta_t2)
    log.info("ending %s", filepath)
    return delta_t, delta_t2
Fusion360DebugUtilities.py 文件源码 项目:FusionCSVtoOutput 作者: tapnair 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def perf_log(log, function_reference, command, identifier=''):
    log.append((function_reference, command, identifier, time.process_time()))
zmirror.py 文件源码 项目:zmirror 作者: aploium 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def generate_our_response():
    """
    ???????
    :rtype: Response
    """
    # copy and parse remote response
    resp = copy_response(is_streamed=parse.streamed_our_response)

    if parse.time["req_time_header"] >= 0.00001:
        parse.set_extra_resp_header('X-Header-Req-Time', "%.4f" % parse.time["req_time_header"])
    if parse.time.get("start_time") is not None and not parse.streamed_our_response:
        # remote request time should be excluded when calculating total time
        parse.set_extra_resp_header('X-Body-Req-Time', "%.4f" % parse.time["req_time_body"])
        parse.set_extra_resp_header('X-Compute-Time',
                                    "%.4f" % (process_time() - parse.time["start_time"]))

    parse.set_extra_resp_header('X-Powered-By', 'zmirror/%s' % CONSTS.__VERSION__)

    if developer_dump_all_traffics and not parse.streamed_our_response:
        dump_zmirror_snapshot("traffic")

    return resp
stl_utils.py 文件源码 项目:bpy_lambda 作者: bcongdon 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_stl(filepath):
    """
    Return the triangles and points of an stl binary file.

    Please note that this process can take lot of time if the file is
    huge (~1m30 for a 1 Go stl file on an quad core i7).

    - returns a tuple(triangles, triangles' normals, points).

      triangles
          A list of triangles, each triangle as a tuple of 3 index of
          point in *points*.

      triangles' normals
          A list of vectors3 (tuples, xyz).

      points
          An indexed list of points, each point is a tuple of 3 float
          (xyz).

    Example of use:

       >>> tris, tri_nors, pts = read_stl(filepath)
       >>> pts = list(pts)
       >>>
       >>> # print the coordinate of the triangle n
       >>> print(pts[i] for i in tris[n])
    """
    import time
    start_time = time.process_time()

    tris, tri_nors, pts = [], [], ListDict()

    with open(filepath, 'rb') as data:
        # check for ascii or binary
        gen = _ascii_read if _is_ascii_file(data) else _binary_read

        for nor, pt in gen(data):
            # Add the triangle and the point.
            # If the point is allready in the list of points, the
            # index returned by pts.add() will be the one from the
            # first equal point inserted.
            tris.append([pts.add(p) for p in pt])
            tri_nors.append(nor)

    print('Import finished in %.4f sec.' % (time.process_time() - start_time))

    return tris, tri_nors, pts.list
console.py 文件源码 项目:fg21sim 作者: liweitianux 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _task_default(self, **kwargs):
        """
        The default task that this console manages, which performs
        the foregrounds simulations.

        Returns
        -------
        success : bool
            Whether the task successfully finished?
        error : str
            Error message if the task failed

        NOTE
        ----
        The task is synchronous and may be computationally intensive
        (i.e., CPU-bound rather than IO/event-bound), therefore,
        threads (or processes) are required to make it non-blocking
        (i.e., asynchronous).

        References:
        [1] https://stackoverflow.com/a/32164711/4856091
        """
        t1_start = time.perf_counter()
        t2_start = time.process_time()
        logger.info("Console DEFAULT task: START ...")
        logger.info("Preparing to start foregrounds simulations ...")
        logger.info("Checking the configurations ...")
        self.configs.check_all()
        #
        logger.info("Importing modules + Numba JIT, waiting ...")
        from ...foregrounds import Foregrounds
        #
        fg = Foregrounds(self.configs)
        fg.preprocess()
        fg.simulate()
        fg.postprocess()
        logger.info("Foregrounds simulations DONE!")
        logger.info("Console DEFAULT task: DONE!")
        t1_stop = time.perf_counter()
        t2_stop = time.process_time()
        logger.info("Elapsed time: {0:.3f} (s)".format(t1_stop - t1_start))
        logger.info("CPU process time: {0:.3f} (s)".format(t2_stop - t2_start))
        # NOTE: always return a tuple of (success, error)
        return (True, None)
experiments.py 文件源码 项目:oasis 作者: ngmarchant 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def repeat_expt(smplr, n_expts, n_labels, output_file = None):
    """
    Parameters
    ----------
    smplr : sub-class of PassiveSampler
        sampler must have a sample_distinct method, reset method and ...

    n_expts : int
        number of expts to run

    n_labels : int
        number of labels to query from the oracle in each expt
    """

    FILTERS = tables.Filters(complib='zlib', complevel=5)

    max_iter = smplr._max_iter
    n_class = smplr._n_class
    if max_iter < n_labels:
        raise ValueError("Cannot query {} labels. Sampler ".format(n_labels) +
                         "instance supports only {} iterations".format(max_iter))

    if output_file is None:
        # Use current date/time as filename
        output_file = 'expt_' + time.strftime("%d-%m-%Y_%H:%M:%S") + '.h5'
    logging.info("Writing output to {}".format(output_file))

    f = tables.open_file(output_file, mode='w', filters=FILTERS)
    float_atom = tables.Float64Atom()
    bool_atom = tables.BoolAtom()
    int_atom = tables.Int64Atom()

    array_F = f.create_carray(f.root, 'F_measure', float_atom, (n_expts, n_labels, n_class))
    array_s = f.create_carray(f.root, 'n_iterations', int_atom, (n_expts, 1))
    array_t = f.create_carray(f.root, 'CPU_time', float_atom, (n_expts, 1))

    logging.info("Starting {} experiments".format(n_expts))
    for i in range(n_expts):
        if i%np.ceil(n_expts/10).astype(int) == 0:
            logging.info("Completed {} of {} experiments".format(i, n_expts))
        ti = time.process_time()
        smplr.reset()
        smplr.sample_distinct(n_labels)
        tf = time.process_time()
        if hasattr(smplr, 'queried_oracle_'):
            array_F[i,:,:] = smplr.estimate_[smplr.queried_oracle_]
        else:
            array_F[i,:,:] = smplr.estimate_
        array_s[i] = smplr.t_
        array_t[i] = tf - ti
    f.close()

    logging.info("Completed all experiments")
Policy.py 文件源码 项目:MDP_GridWorld 作者: abdalmoniem 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def valueIteration(self, debugCallback = None, turbo = False):
        '''using the value iteration algorithm (see AI: A Modern Approach (Third ed.) pag. 652)
           calculate the utilities for all states in the grid world

           the debugCallback must be a function that has three parameters:
                policy: that the function can use to display intermediate results
                isEnded: that the function can use to know if the valueIteration is ended
            the debugCallback must return True, and can stop the algorithm returning False

            the algorithm has a maximum number of iterations, in this way we can compute an
            example with a discount factor = 1 that converge.

            the turbo mode uses the utility vector of the (i-1)-th iteration to compute
            the utility vector of the i-th iteration. The classic approach is different because
            we compute the i-th iteration using the utility vector of the (i-1)-th iteration.
            With this algorithm, using the turbo mode, we have an improvement of 30%

           returns the number of iterations it needs for converge
        '''
        eps = Policy.valueIterationEpsilon
        dfact = self.world.discFactor
        c, r = self.world.size
        if turbo: newUv = self.utilities

        reiterate = True
        start = time.process_time()
        while(reiterate):
            self.numOfIterations += 1
            maxNorm = 0 #see the max norm definition in AI: A Modern Approach (Third ed.) pag. 654

            if not turbo: newUv = self.__createEmptyUtilityVector()

            for x in range(c):
                for y in range(r):
                    v = self.__cellUtility(x, y) #calculate using the self.utilities (i.e. the previous step)
                    if not v is None: maxNorm = max(maxNorm, abs(self.utilities[y][x] - v))
                    newUv[y][x] = v #update the new utility vector that we are creating

            if not turbo: self.utilities = newUv

            if debugCallback: reiterate = debugCallback(self, False)

            if maxNorm <= eps * (1 - dfact)/dfact: reiterate = False

            end = time.process_time()
            self.elapsed = end - start
            if self.numOfIterations >= Policy.maxNumberOfIterations or self.elapsed > Policy.timeToLive:
                reiterate = False
                print("warning: max number of iterations exceeded")
                messagebox.showwarning("Warning", "max number of iterations exceeded")

        if debugCallback: reiterate = debugCallback(self, True)

        return self.numOfIterations
stl_utils.py 文件源码 项目:blender-addons 作者: scorpion81 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def read_stl(filepath):
    """
    Return the triangles and points of an stl binary file.

    Please note that this process can take lot of time if the file is
    huge (~1m30 for a 1 Go stl file on an quad core i7).

    - returns a tuple(triangles, triangles' normals, points).

      triangles
          A list of triangles, each triangle as a tuple of 3 index of
          point in *points*.

      triangles' normals
          A list of vectors3 (tuples, xyz).

      points
          An indexed list of points, each point is a tuple of 3 float
          (xyz).

    Example of use:

       >>> tris, tri_nors, pts = read_stl(filepath)
       >>> pts = list(pts)
       >>>
       >>> # print the coordinate of the triangle n
       >>> print(pts[i] for i in tris[n])
    """
    import time
    start_time = time.process_time()

    tris, tri_nors, pts = [], [], ListDict()

    with open(filepath, 'rb') as data:
        # check for ascii or binary
        gen = _ascii_read if _is_ascii_file(data) else _binary_read

        for nor, pt in gen(data):
            # Add the triangle and the point.
            # If the point is allready in the list of points, the
            # index returned by pts.add() will be the one from the
            # first equal point inserted.
            tris.append([pts.add(p) for p in pt])
            tri_nors.append(nor)

    print('Import finished in %.4f sec.' % (time.process_time() - start_time))

    return tris, tri_nors, pts.list
__init__.py 文件源码 项目:ablator 作者: ablator 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def which(client_user: ClientUser, functionality: Functionality) -> Optional[Availability]:
    """
    Which Flavor of the given Functionality is enabled for the user, if any?

    Returns a Flavor object that corresponds to the ClientUser's enabled functionality,
    or `None` if the user does not have any Flavor in the given Functionality.

    Use ClientUser.user_from_object to get or create a ClientUser instance from any hashable
    object (usually a string).
    """
    context = WhichContext()
    context.client_user = client_user
    context.functionality = functionality

    pipeline = [
        # roll out strategies
        check_roll_out_recall,
        check_roll_out_enable_globally,

        # retrieve availability
        get_availability,

        # check availability and switch on based on max user count
        check_for_existing_enabled_availability,
        assert_roll_out_is_not_paused,
        assert_existence_of_release,
        assert_existence_of_flavors,
        get_enabled_count,
        create_new_availability_with_random_flavor,
        enable_availability_by_user_count,
    ]

    # Go through each function in the pipeline. If it yields an Availability, we're done
    # and can return it. Otherwise, continue until we hit the end, or catch a NoAvailability
    # exception.
    # Splitting the methods up like this helps with testing, caching, and gaining an overview over
    # what actually happens through logging. Hopefully.
    start_time = time.process_time()
    for func in pipeline:
        try:
            av = func(context)
            if av:
                save_request_log_entry(
                    str(context.functionality.id),
                    str(av.flavor_id),
                    func.__name__,
                    client_user.id,
                    time.process_time() - start_time
                )
                return av
        except NoAvailability:
            save_request_log_entry(
                str(context.functionality.id),
                None,
                func.__name__,
                client_user.id,
                time.process_time() - start_time
            )
            return None
    return None
models_siamese.py 文件源码 项目:gcn_metric_learning 作者: sk1712 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def fit(self, train_data, train_labels, val_data, val_labels):
        t_process, t_wall = time.process_time(), time.time()
        sess = tf.Session(graph=self.graph)
        shutil.rmtree(self._get_path('summaries'), ignore_errors=True)
        writer = tf.summary.FileWriter(self._get_path('summaries'), self.graph)
        shutil.rmtree(self._get_path('checkpoints'), ignore_errors=True)
        os.makedirs(self._get_path('checkpoints'))
        path = os.path.join(self._get_path('checkpoints'), 'model')
        sess.run(self.op_init)

        # Training.
        accuracies = []
        losses = []
        indices = collections.deque()
        num_steps = int(self.num_epochs * train_data.shape[0] / self.batch_size)
        for step in range(1, num_steps+1):

            # Be sure to have used all the samples before using one a second time.
            if len(indices) < self.batch_size:
                indices.extend(np.random.permutation(train_data.shape[0]))
            idx = [indices.popleft() for i in range(self.batch_size)]

            batch_data, batch_labels = train_data[idx, :, :, :], train_labels[idx]
            if type(batch_data) is not np.ndarray:
                batch_data = batch_data.toarray()  # convert sparse matrices
            feed_dict = {self.ph_data: batch_data, self.ph_labels: batch_labels, self.ph_dropout: self.dropout}
            learning_rate, loss_average = sess.run([self.op_train, self.op_loss_average], feed_dict)

            # Periodical evaluation of the model.
            if step % self.eval_frequency == 0 or step == num_steps:
                epoch = step * self.batch_size / train_data.shape[0]
                print('step {} / {} (epoch {:.2f} / {}):'.format(step, num_steps, epoch, self.num_epochs))
                print('  learning_rate = {:.2e}, loss_average = {:.2e}'.format(learning_rate, loss_average))

                string, auc, loss, scores_summary = self.evaluate(train_data, train_labels, sess)
                print('  training {}'.format(string))

                string, auc, loss, scores_summary = self.evaluate(val_data, val_labels, sess)
                print('  validation {}'.format(string))
                print('  time: {:.0f}s (wall {:.0f}s)'.format(time.process_time()-t_process, time.time()-t_wall))

                accuracies.append(auc)
                losses.append(loss)

                # Summaries for TensorBoard.
                summary = tf.Summary()
                summary.ParseFromString(sess.run(self.op_summary, feed_dict))
                summary.value.add(tag='validation/auc', simple_value=auc)
                summary.value.add(tag='validation/loss', simple_value=loss)
                writer.add_summary(summary, step)

                # Save model parameters (for evaluation).
                self.op_saver.save(sess, path, global_step=step)

        print('validation accuracy: peak = {:.2f}, mean = {:.2f}'.format(max(accuracies), np.mean(accuracies[-10:])))
        writer.close()
        sess.close()

        t_step = (time.time() - t_wall) / num_steps
        return accuracies, losses, t_step, scores_summary


问题


面经


文章

微信
公众号

扫码关注公众号