python类proto()的实例源码

mean_image.py 文件源码 项目:nimo 作者: wolfram2012 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def protoBlobFileToND(proto_file):
    data = ''
    file = open(proto_file, "r")
    if not file:
        raise Exception("ERROR (" + proto_file + ")!")
    data = file.read()
    file.close()

    if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
mean_image.py 文件源码 项目:mxnet-ssd 作者: zhreshold 项目源码 文件源码 阅读 104 收藏 0 点赞 0 评论 0
def protoBlobFileToND(proto_file):
    data = ''
    file = open(proto_file, "r")
    if not file:
        raise Exception("ERROR (" + proto_file + ")!")
    data = file.read()
    file.close()

    if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
model_def_hard.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def LeNet(lmdb, batch_size):
  # our version of LeNet: a series of linear and simple nonlinear transformations
  n = caffe.NetSpec()

  n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                           transform_param=dict(scale=1./255), ntop=2)

  n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
  n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
  n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
  n.relu1 = L.ReLU(n.fc1, in_place=True)
  n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
  n.loss =  L.SoftmaxWithLoss(n.score, n.label)

  proto = n.to_proto()
  proto.name = 'LeNet'
  return proto
model_def.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def LeNet(lmdb, batch_size):
  # our version of LeNet: a series of linear and simple nonlinear transformations
  n = caffe.NetSpec()

  n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                           transform_param=dict(scale=1./255), ntop=2)

  n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
  n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
  n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
  n.relu1 = L.ReLU(n.fc1, in_place=True)
  n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
  n.loss =  L.SoftmaxWithLoss(n.score, n.label)

  proto = n.to_proto()
  proto.name = 'LeNet'
  return proto
model_def2.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def LeNet(lmdb, batch_size):
  # our version of LeNet: a series of linear and simple nonlinear transformations
  n = caffe.NetSpec()

  n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                           transform_param=dict(scale=1./255), ntop=2)

  n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
  n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
  n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
  n.relu1 = L.ReLU(n.fc1, in_place=True)
  n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
  n.loss =  L.SoftmaxWithLoss(n.score, n.label)

  proto = n.to_proto()
  proto.name = 'LeNet'
  return proto
mean_image.py 文件源码 项目:additions_mxnet 作者: eldercrow 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def protoBlobFileToND(proto_file):
    data = ''
    file = open(proto_file, "r")
    if not file:
        raise Exception("ERROR (" + proto_file + ")!")
    data = file.read()
    file.close()

    if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
noimprove_ip2.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
improve_ip1_new.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
improve_ip2.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
test_new.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
test_ip1_new.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
eval_model_ip1.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
test2.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y)
improve_model_ip1.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
test.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
improve_ip2_new.py 文件源码 项目:CaffeSVD 作者: wkcn 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
convert_symbol.py 文件源码 项目:nimo 作者: wolfram2012 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def read_proto_solver_file(file_path):
    solver_config = ''
    if caffe_flag:
        solver_config = caffe.proto.caffe_pb2.NetParameter()
    else:
        solver_config = caffe_parse.caffe_pb2.NetParameter()
    return read_proto_file(file_path, solver_config)
convert_from_caffe.py 文件源码 项目:sesame-paste-noodle 作者: aissehust 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def getMeanImage(mean_path):

    # the following code is from https://github.com/BVLC/caffe/issues/290
    blob = caffe.proto.caffe_pb2.BlobProto()
    data = open( mean_path , 'rb' ).read()
    blob.ParseFromString(data)
    arr = np.array( caffe.io.blobproto_to_array(blob) )
    out = arr[0]
    np.save( sys.argv[2] , out )
score_model.py 文件源码 项目:score-zeroshot 作者: pedro-morgado 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def generate_solver_proto(solver_fn, model_fn, trainOpts):
        from caffe.proto import caffe_pb2
        solver = caffe_pb2.SolverParameter()
        solver.net = model_fn

        if trainOpts.num_lr_decays > 0:
            solver.lr_policy = 'step'
            solver.gamma = trainOpts.lr_decay_factor
            solver.stepsize = int(trainOpts.iters/(trainOpts.num_lr_decays+1))
        else:
            solver.lr_policy = 'fixed'
        solver.base_lr = trainOpts.init_lr
        solver.max_iter = trainOpts.iters
        solver.display = 20
        solver.momentum = 0.9
        solver.weight_decay = trainOpts.paramReg

        solver.test_state.add()
        solver.test_state.add()
        solver.test_state[0].stage.append('TestRecognition')
        solver.test_state[1].stage.append('TestZeroShot')
        solver.test_iter.extend([20, 20])
        solver.test_interval = 100

        solver.snapshot = 5000
        solver.snapshot_prefix = os.path.splitext(model_fn)[0]

        with open(solver_fn, 'w') as f:
            f.write(str(solver))
score_model.py 文件源码 项目:score-zeroshot 作者: pedro-morgado 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def prep_for_deploy(self, batch_size, source_net=False, target_net=False, deploy_fn='deploy.proto', caffemodel_fn='score.caffemodel', gpu_id=0):
        caffe.set_mode_gpu()
        caffe.set_device(gpu_id)

        self.generate_deploy_proto(deploy_fn, batch_size, source_net=source_net, target_net=target_net)
        self.deploy = caffe.Net(deploy_fn, caffe.TEST, weights=caffemodel_fn)

        self._set_semantics(self.deploy, source=False, init_cw=False)
        self._set_semantics(self.deploy, source=True, init_cw=False)
convert_symbol.py 文件源码 项目:mxnet-ssd 作者: zhreshold 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_proto_solver_file(file_path):
    solver_config = ''
    if caffe_flag:
        solver_config = caffe.proto.caffe_pb2.NetParameter()
    else:
        solver_config = caffe_parse.caffe_pb2.NetParameter()
    return read_proto_file(file_path, solver_config)
solver.py 文件源码 项目:deepwater-nae 作者: h2oai 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def solver_graph(self):
        proto = caffe_pb2.SolverParameter()
        proto.type = self.cmd.solver_type
        if self.device is not None:
            proto.solver_mode = caffe_pb2.SolverParameter.SolverMode.Value(
                'GPU')
            proto.device_id = self.device
        else:
            proto.solver_mode = caffe_pb2.SolverParameter.SolverMode.Value(
                'CPU')
        proto.lr_policy = 'fixed'
        proto.base_lr = self.cmd.learning_rate
        proto.momentum = self.cmd.momentum
        proto.max_iter = int(2e9)
        proto.random_seed = self.cmd.random_seed + self.rank
        print('Setting seed ', proto.random_seed, file = sys.stderr)
        proto.display = 1

        batch = int(solver.cmd.input_shape[0] / solver.size)
        if self.cmd.graph:
            dir = os.path.dirname(os.path.realpath(__file__))
            proto.net = dir + '/' + self.cmd.graph + '.prototxt'
        else:
            proto.train_net_param.MergeFrom(self.net_def(caffe.TRAIN))
            proto.test_net_param.add().MergeFrom(self.net_def(caffe.TEST))

        proto.test_iter.append(1)
        proto.test_interval = 999999999  # cannot disable or set to 0
        proto.test_initialization = False
        return proto
solver.py 文件源码 项目:deep_share 作者: luyongxi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, solver_prototxt=None, path=None, base_lr=0.01, lr_policy="step", 
        gamma=0.1, stepsize=20000, momentum=0.9, weight_decay=0.0005,
        regularization_type="L2", clip_gradients=None):

        assert (path is not None) or (solver_prototxt is not None),\
            'Need to specify either path or solver_prototxt.'

        self._solver = caffe_pb2.SolverParameter()

        if solver_prototxt is not None:
            self._solver_prototxt = solver_prototxt
            with open(solver_prototxt, 'rt') as f:
                pb2.text_format.Merge(f.read(), self._solver)                                   
        elif path is not None:
            self._solver_prototxt = osp.join(path, 'solver.prototxt')
            # update proto object
            self._solver.net = osp.join(path, 'train_val.prototxt')
            self._solver.base_lr = base_lr
            self._solver.lr_policy = lr_policy
            self._solver.gamma = gamma
            self._solver.stepsize = stepsize
            self._solver.momentum = momentum
            self._solver.weight_decay = weight_decay
            self._solver.regularization_type = regularization_type
            # caffe solver snapshotting is disabled
            self._solver.snapshot = 0
            # shut down caffe display
            self._solver.display = 0
            # shut down caffe validation
            self._solver.test_iter.append(0)
            self._solver.test_interval = 1000
            if clip_gradients is not None:
                self._solver.clip_gradients = clip_gradients
train.py 文件源码 项目:Land_Use_CNN 作者: BUPTLdy 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def caffenet(data, label=None, train=True, num_classes=1000,
             classifier_name='fc8', learn_all=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with open('/home/ldy/workspace/caffe/models/finetune_UCMerced_LandUse/deploy.prototxt','w') as f:
        f.write(str(n.to_proto()))
        return f.name
model_def_hard.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def SketchTriplet_anchor(out_dim):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
model_def_hard.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def SketchTriplet_pos(out_dim):
  n = caffe.NetSpec()
  n.data_p              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_p, n.relu1_p  = conv_relu_triplet_dep(n.data_p, 15, 64, stride = 3)
  n.pool1_p = pooling(n.relu1_p, 3, stride=2)

  n.conv2_p, n.relu2_p  = conv_relu_triplet_dep(n.pool1_p, 5, 128)
  n.pool2_p = pooling(n.relu2_p, 3, stride=2)

  n.conv3_p, n.relu3_p  = conv_relu_triplet_dep(n.pool2_p, 3, 256)

  n.conv4_p, n.relu4_p  = conv_relu_triplet_dep(n.relu3_p, 3, 256)

  n.conv5_p, n.relu5_p  = conv_relu_triplet_dep(n.relu4_p, 3, 256)
  n.pool5_p = pooling(n.relu5_p, 3, stride=2)

  n.fc6_p, n.relu6_p    = fc_relu_triplet_dep(n.pool5_p, 512)

  n.fc7_p, n.relu7_p    = fc_relu_triplet_dep(n.relu6_p, 512)

  #n.fc8_p, n.feat_p     = fc_norm_triplet_dep(n.relu7_p, out_dim)
  n.feat_p     = fc_triplet_dep(n.relu7_p, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
model_def.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def SketchTriplet_anchor(out_dim):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
model_def.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def SketchTriplet_pos(out_dim):
  n = caffe.NetSpec()
  n.data_p              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_p, n.relu1_p  = conv_relu_triplet_dep(n.data_p, 15, 64, stride = 3)
  n.pool1_p = pooling(n.relu1_p, 3, stride=2)

  n.conv2_p, n.relu2_p  = conv_relu_triplet_dep(n.pool1_p, 5, 128)
  n.pool2_p = pooling(n.relu2_p, 3, stride=2)

  n.conv3_p, n.relu3_p  = conv_relu_triplet_dep(n.pool2_p, 3, 256)

  n.conv4_p, n.relu4_p  = conv_relu_triplet_dep(n.relu3_p, 3, 256)

  n.conv5_p, n.relu5_p  = conv_relu_triplet_dep(n.relu4_p, 3, 256)
  n.pool5_p = pooling(n.relu5_p, 3, stride=2)

  n.fc6_p, n.relu6_p    = fc_relu_triplet_dep(n.pool5_p, 512)

  n.fc7_p, n.relu7_p    = fc_relu_triplet_dep(n.relu6_p, 512)

  #n.fc8_p, n.feat_p     = fc_norm_triplet_dep(n.relu7_p, out_dim)
  n.feat_p     = fc_triplet_dep(n.relu7_p, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
model_def2.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def SketchTriplet_anchor(out_dim):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  n.norm_a = L.Normalize(n.feat_a,in_place=True)

  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
model_def2.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def siamese_anchor(out_dim=100):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  #n.norm_a = L.Normalize(n.feat_a,in_place=True)

  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto


问题


面经


文章

微信
公众号

扫码关注公众号