python类cpu()的实例源码

Solver.py 文件源码 项目:Vehicle_ReID 作者: starimpact 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, prefix='', symbol=None, ctx=None, data_shape=None, label_shape=None,
               num_epoch=None, falsebigbatch=1, opt_method='sgd', **kwargs):
    self.prefix = prefix
    self.symbol = symbol
    self.ctx = ctx
    if self.ctx is None:
        self.ctx = mx.cpu()
    self.data_shape = data_shape
    self.label_shape = label_shape
    self.batchsize = data_shape[0]
    self.num_epoch = num_epoch
    self.update_params = None
    self.arg_params = None
    self.aux_params = None
    self.grad_params = None
    self.executor = None
    self.opt_method = opt_method
    self.optimizer = None
    self.falsebigbatch = falsebigbatch
    print 'false big batch size:%d*%d=%d'%(falsebigbatch, self.batchsize, falsebigbatch * self.batchsize)
    self.bigbatch_grads = None
    self.updater = None
    self.kwargs = kwargs.copy()
    self.initializer=mx.init.Xavier()
Module_Combine.py 文件源码 项目:Vehicle_ReID 作者: starimpact 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, name, symbol, 
               data_names=('data',), 
               data_shapes=(),
               label_names=('label'), 
               label_shapes=(),
               inputs_need_grad=False,
               optimizer='sgd',
               optimizer_params={'learning_rate':0.1, 'momentum':0.9, 'wd':0.0005},
               initializer=mx.init.Normal(),
               context=mx.cpu()):
    self.name = name
    self.symbol = symbol
    self.data_names = data_names
    self.label_names = label_names
    self.data_shapes = data_shapes
    self.label_shapes = label_shapes
    self.inputs_need_grad = inputs_need_grad
    self.optimizer = optimizer
    self.optimizer_params = optimizer_params
    self.initializer = initializer
    self.context = context
solver.py 文件源码 项目:mxnet-deeplab 作者: buptweixin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, symbol, ctx=None,
                begin_epoch=0, num_epoch=None,
                arg_params=None, aux_params=None,
                optimizer='sgd', **kwargs):
        # ??????
        self.symbol = symbol
        if ctx is None:
            ctx = mx.cpu()
        self.ctx = ctx
        # ??epoch,??0?????????
        self.begin_epoch = begin_epoch
        # ???epoch?
        self.num_epoch = num_epoch
        self.arg_params = arg_params
        self.aux_params = aux_params
        # ??????,?????????
        self.optimizer = optimizer
        self.kwargs = kwargs.copy()
mnist.py 文件源码 项目:mx-lsoftmax 作者: luoyetx 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def train():
    ctx = mx.gpu(args.gpu) if args.gpu >=0 else mx.cpu()
    train = mx.io.MNISTIter(
                image='data/train-images-idx3-ubyte',
                label='data/train-labels-idx1-ubyte',
                input_shape=(1, 28, 28),
                mean_r=128,
                scale=1./128,
                batch_size=args.batch_size,
                shuffle=True)
    val = mx.io.MNISTIter(
                image='data/t10k-images-idx3-ubyte',
                label='data/t10k-labels-idx1-ubyte',
                input_shape=(1, 28, 28),
                mean_r=128,
                scale=1./128,
                batch_size=args.batch_size)
    symbol = get_symbol()
    mod = mx.mod.Module(
            symbol=symbol,
            context=ctx,
            data_names=('data',),
            label_names=('softmax_label',))
    num_examples = 60000
    epoch_size = int(num_examples / args.batch_size)
    optim_params = {
        'learning_rate': args.lr,
        'momentum': 0.9,
        'wd': 0.0005,
        'lr_scheduler': mx.lr_scheduler.FactorScheduler(step=10*epoch_size, factor=0.1),
    }
    mod.fit(train_data=train,
            eval_data=val,
            eval_metric=mx.metric.Accuracy(),
            initializer=mx.init.Xavier(),
            optimizer='sgd',
            optimizer_params=optim_params,
            num_epoch=args.num_epoch,
            batch_end_callback=mx.callback.Speedometer(args.batch_size, 50),
            epoch_end_callback=mx.callback.do_checkpoint(args.model_prefix))
mnist.py 文件源码 项目:mx-lsoftmax 作者: luoyetx 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def profile():
    ctx = mx.gpu(args.gpu) if args.gpu >=0 else mx.cpu()
    val = mx.io.MNISTIter(
            image='data/t10k-images-idx3-ubyte',
            label='data/t10k-labels-idx1-ubyte',
            input_shape=(1, 28, 28),
            mean_r=128,
            scale=1./128,
            batch_size=args.batch_size)
    symbol = get_symbol()
    mod = mx.mod.Module(
            symbol=symbol,
            context=ctx,
            data_names=('data',),
            label_names=('softmax_label',))
    mod.bind(data_shapes=val.provide_data, label_shapes=val.provide_label, for_training=True)
    mod.init_params(initializer=mx.init.Xavier())

    # run a while
    for nbatch, data_batch in enumerate(val):
        mod.forward_backward(data_batch)

    # profile
    mx.profiler.profiler_set_config(mode='symbolic', filename='profile.json')
    mx.profiler.profiler_set_state('run')
    val.reset()
    for nbatch, data_batch in enumerate(val):
        mod.forward_backward(data_batch)
    mx.profiler.profiler_set_state('stop')
loader.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, roidb, batch_size=2, shuffle=False, mode='train', ctx=None, work_load_list=None):
        """
        This Iter will provide roi data to Fast R-CNN network
        :param roidb: must be preprocessed
        :param batch_size: must divide BATCH_SIZE(128)
        :param shuffle: bool
        :param mode: control returned info
        :param ctx: list of contexts
        :param work_load_list: list of work load
        :return: ROIIter
        """
        super(ROIIter, self).__init__()

        self.roidb = roidb
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.mode = mode
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = [mx.cpu()]
        self.work_load_list = work_load_list

        self.cur = 0
        self.size = len(roidb)
        self.index = np.arange(self.size)
        self.num_classes = self.roidb[0]['gt_overlaps'].shape[1]
        self.reset()

        self.batch = None
        self.data = None
        self.label = None
        self.get_batch()
        self.data_name = ['data', 'rois']
        self.label_name = ['label', 'bbox_target', 'bbox_inside_weight', 'bbox_outside_weight']
generate.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, symbol, ctx=None,
                 arg_params=None, aux_params=None):
        self.symbol = symbol
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = mx.cpu()
        self.executor = None
        self.arg_params = arg_params
        self.aux_params = aux_params
detector.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, symbol, ctx=None,
                 arg_params=None, aux_params=None):
        self.symbol = symbol
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = mx.cpu()
        self.arg_params = arg_params
        self.aux_params = aux_params
        self.executor = None
load_model.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def load_param_rcnn(prefix, epoch, convert=False, ctx=None):
    """
    wrapper for load checkpoint
    :param prefix: Prefix of model name.
    :param epoch: Epoch number of model we would like to load.
    :param convert: reference model should be converted to GPU NDArray first
    :param ctx: if convert then ctx must be designated.
    :return: (arg_params, aux_params)
    """
    arg_params, aux_params = load_checkpoint(prefix, epoch)
    num_classes = 1000
    if "bbox_pred_bias" in arg_params.keys():
        num_classes = len(arg_params['bbox_pred_bias'].asnumpy()) / 4
    if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED and "bbox_pred_bias" in arg_params.keys():
        print "lode model with mean/std"
        means = np.tile(np.array(config.TRAIN.BBOX_MEANS_INV), (1, num_classes))
        stds = np.tile(np.array(config.TRAIN.BBOX_STDS_INV), (1, num_classes))
        arg_params['bbox_pred_weight'] = (arg_params['bbox_pred_weight'].T * mx.nd.array(stds)).T
        arg_params['bbox_pred_bias'] = (arg_params['bbox_pred_bias'] - mx.nd.array(np.squeeze(means))) * \
                                       mx.nd.array(np.squeeze(stds))

    if convert:
        if ctx is None:
            ctx = mx.cpu()
        arg_params = convert_context(arg_params, ctx)
        aux_params = convert_context(aux_params, ctx)
    return arg_params, aux_params, num_classes
load_model.py 文件源码 项目:iot-demo-mxnet-greengrass 作者: aquaviter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, synset_path, network_prefix, params_url=None, symbol_url=None, synset_url=None, context=mx.cpu(), label_names=['prob_label'], input_shapes=[('data', (1,3,224,224))]):

        # Download the symbol set and network if URLs are provided
        if params_url is not None:
            print "fetching params from "+params_url
            fetched_file = urllib2.urlopen(params_url)
            with open(network_prefix+"-0000.params",'wb') as output:
                output.write(fetched_file.read())

        if symbol_url is not None:
            print "fetching symbols from "+symbol_url
            fetched_file = urllib2.urlopen(symbol_url)
            with open(network_prefix+"-symbol.json",'wb') as output:
                output.write(fetched_file.read())

        if synset_url is not None:
            print "fetching synset from "+synset_url
            fetched_file = urllib2.urlopen(synset_url)
            with open(synset_path,'wb') as output:
                output.write(fetched_file.read())

        # Load the symbols for the networks
        with open(synset_path, 'r') as f:
            self.synsets = [l.rstrip() for l in f]

        # Load the network parameters from default epoch 0
        sym, arg_params, aux_params = mx.model.load_checkpoint(network_prefix, 0)

        # Load the network into an MXNet module and bind the corresponding parameters
        self.mod = mx.mod.Module(symbol=sym, label_names=label_names, context=context)
        self.mod.bind(for_training=False, data_shapes= input_shapes)
        self.mod.set_params(arg_params, aux_params)
        self.camera = None
detector.py 文件源码 项目:nimo 作者: wolfram2012 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
                 batch_size=1, ctx=None):
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = mx.cpu()
        load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
        if symbol is None:
            symbol = load_symbol
        self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
        self.data_shape = data_shape
        self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])
        self.mod.set_params(args, auxs)
        self.data_shape = data_shape
        self.mean_pixels = mean_pixels
mxnet_emitter.py 文件源码 项目:MMdnn 作者: Microsoft 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def header_code(self):
        return """import mxnet as mx
import numpy as np
import math

# mxnet-cpu only support channel first, default convert the model and weight as channel first

def RefactorModel():
"""
HeadPoseEstimator.py 文件源码 项目:cnn_head_pose_estimator 作者: laodar 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self,model_prefix='./model/cpt',ctx=mx.cpu()):
        '''
                Initialize the estimator
            Parameters:
            ----------
                model_prefix: string
                    path for the pretrained mxnet models
                ctx: context
                    the context where CNN running on
        '''
        self.model = mx.model.FeedForward.load(model_prefix, 1, ctx=ctx)
loader.py 文件源码 项目:focal-loss 作者: unsky 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, roidb, config, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
        """
        This Iter will provide roi data to Fast R-CNN network
        :param roidb: must be preprocessed
        :param batch_size: must divide BATCH_SIZE(128)
        :param shuffle: bool
        :param ctx: list of contexts
        :param work_load_list: list of work load
        :param aspect_grouping: group images with similar aspects
        :return: ROIIter
        """
        super(ROIIter, self).__init__()

        # save parameters as properties
        self.roidb = roidb
        self.cfg = config
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = [mx.cpu()]
        self.work_load_list = work_load_list
        self.aspect_grouping = aspect_grouping

        # infer properties from roidb
        self.size = len(roidb)
        self.index = np.arange(self.size)

        # decide data and label names (only for training)
        self.data_name = ['data', 'rois']
        self.label_name = ['label', 'bbox_target', 'bbox_weight']

        # status variable for synchronization between get_data and get_label
        self.cur = 0
        self.batch = None
        self.data = None
        self.label = None

        # get first batch to fill in provide_data and provide_label
        self.reset()
        self.get_batch_individual()
tester.py 文件源码 项目:focal-loss 作者: unsky 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, symbol, data_names, label_names,
                 context=mx.cpu(), max_data_shapes=None,
                 provide_data=None, provide_label=None,
                 arg_params=None, aux_params=None):
        self._mod = MutableModule(symbol, data_names, label_names,
                                  context=context, max_data_shapes=max_data_shapes)
        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
loader.py 文件源码 项目:odnl 作者: lilhope 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, roidb, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
        """
        This Iter will provide roi data to Fast R-CNN network
        :param roidb: must be preprocessed
        :param batch_size: must divide BATCH_SIZE(128)
        :param shuffle: bool
        :param ctx: list of contexts
        :param work_load_list: list of work load
        :param aspect_grouping: group images with similar aspects
        :return: ROIIter
        """
        super(ROIIter, self).__init__()

        # save parameters as properties
        self.roidb = roidb
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = [mx.cpu()]
        self.work_load_list = work_load_list
        self.aspect_grouping = aspect_grouping

        # infer properties from roidb
        self.size = len(roidb)
        self.index = np.arange(self.size)

        # decide data and label names (only for training)
        self.data_name = ['data', 'rois']
        self.label_name = ['label', 'bbox_target', 'bbox_weight']

        # status variable for synchronization between get_data and get_label
        self.cur = 0
        self.batch = None
        self.data = None
        self.label = None

        # get first batch to fill in provide_data and provide_label
        self.reset()
        self.get_batch()
tester.py 文件源码 项目:odnl 作者: lilhope 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, sym_gen,cfg,data_names, label_names,
                 context=mx.cpu(), max_data_shapes=None,
                 provide_data=None, provide_label=None,
                 arg_params=None, aux_params=None):
        self._mod = MutableModule(sym_gen,cfg,data_names, label_names,is_train=False,
                                  context=context, max_data_shapes=max_data_shapes)
        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
load_model.py 文件源码 项目:odnl 作者: lilhope 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_param(prefix, epoch, convert=False, ctx=None,cells=None, process=False):
    """
    wrapper for load checkpoint
    :param prefix: Prefix of model name.
    :param epoch: Epoch number of model we would like to load.
    :param convert: reference model should be converted to GPU NDArray first
    :param ctx: if convert then ctx must be designated.
    :param process: model should drop any test
    :return: (arg_params, aux_params)
    """
    arg_params, aux_params = load_checkpoint(prefix, epoch)
    if cells is not None:
        if isinstance(cells, mx.rnn.BaseRNNCell):
            cells = [cells]
        for cell in cells:
            arg_params = cell.pack_weights(arg_params)
    if convert:
        if ctx is None:
            ctx = mx.cpu()
        arg_params = convert_context(arg_params, ctx)
        aux_params = convert_context(aux_params, ctx)
    if process:
        tests = [k for k in arg_params.keys() if '_test' in k]
        for test in tests:
            arg_params[test.replace('_test', '')] = arg_params.pop(test)
    return arg_params, aux_params
mxnet_model.py 文件源码 项目:char-rnn-text-generation 作者: yxtay 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load(cls, checkpoint_path, ctx=mx.cpu(), **kwargs):
        """
        loads model from checkpoint_path.
        """
        with open("{}.json".format(checkpoint_path)) as f:
            model_args = json.load(f)
        model = cls(**model_args, **kwargs)
        model.load_params(checkpoint_path, ctx)
        logger.info("model loaded: %s.", checkpoint_path)
        return model
test_utils.py 文件源码 项目:sockeye 作者: awslabs 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_print_value():
    data = mx.sym.Variable("data")
    weights = mx.sym.Variable("weights")
    softmax_label = mx.sym.Variable("softmax_label")

    fc = mx.sym.FullyConnected(data=data, num_hidden=128, weight=weights, no_bias=True)
    out = mx.sym.SoftmaxOutput(data=fc, label=softmax_label, name="softmax")

    fc_print = mx.sym.Custom(op_type="PrintValue", data=fc, print_name="FullyConnected")
    out_print = mx.sym.SoftmaxOutput(data=fc_print, label=softmax_label, name="softmax")

    data_np = np.random.rand(1, 256)
    weights_np = np.random.rand(128, 256)
    label_np = np.random.rand(1, 128)

    executor_base = out.simple_bind(mx.cpu(), data=(1, 256), softmax_label=(1, 128), weights=(128, 256))
    executor_base.arg_dict["data"][:] = data_np
    executor_base.arg_dict["weights"][:] = weights_np
    executor_base.arg_dict["softmax_label"][:] = label_np

    executor_print = out_print.simple_bind(mx.cpu(), data=(1, 256), softmax_label=(1, 128), weights=(128, 256))
    executor_print.arg_dict["data"][:] = data_np
    executor_print.arg_dict["weights"][:] = weights_np
    executor_print.arg_dict["softmax_label"][:] = label_np

    output_base = executor_base.forward(is_train=True)[0]
    output_print = executor_print.forward(is_train=True)[0]
    assert np.isclose(output_base.asnumpy(), output_print.asnumpy()).all()

    executor_base.backward()
    executor_print.backward()
    assert np.isclose(executor_base.grad_arrays[1].asnumpy(), executor_print.grad_arrays[1].asnumpy()).all()


问题


面经


文章

微信
公众号

扫码关注公众号