score_model.py 文件源码

python
阅读 24 收藏 0 点赞 0 评论 0

项目:score-zeroshot 作者: pedro-morgado 项目源码 文件源码
def _score_proto(self, xFeat, source_net=False, target_net=False, mult=1., deploy=False):
        from caffe.proto import caffe_pb2
        ns = self.netspec
        w_params = {'lr_mult': mult, 'decay_mult': mult}

        # Compute semantic space
        name = 'SCoRe/sem/fc1'
        layer_params = dict(weight_filler=FC_W_INIT, param=[w_params]) if not deploy else {}
        x = ns[name] = L.InnerProduct(xFeat, name=name, num_output=sum(self.code_dim), bias_term=False, **layer_params)

        # Note: In the case of completely binary semantics (Attributes), the two layers codewords+selector are compressed in 'SCoRe/obj/fc'.
        # Otherwise, semantic state scores are first computed in SCoRe/sem/fc2 and then grouped into class scores using a selector in SCoRe/obj/fc.
        # The selector is always kept fixed, and the codewords are learned whenever code_coeff<inf.
        xSem = 'SCoRe/sem/fc1' if self.semantics == ATTRIBUTES else 'SCoRe/sem/fc2'
        xObj = 'SCoRe/obj/fc'
        lCW = xObj + '/params' if self.semantics == ATTRIBUTES else xSem + '/params'
        if self.semantics != ATTRIBUTES:
            w_params = {'name': xSem+'/params',
                        'share_mode': caffe_pb2.ParamSpec.STRICT,
                        'lr_mult': mult if self.code_coeff < np.inf else 0.0,       # Lock weights if code_coeff is inf
                        'decay_mult': mult if self.code_coeff < np.inf else 0.0}
            layer_params = dict(weight_filler=FC_W_INIT, param=[w_params]) if not deploy else {}
            ns[xSem] = L.InnerProduct(x, name=xSem, num_output=sum(self.num_states), bias_term=False, **layer_params)

        # Compute object scores
        if source_net:
            w_params = {'name': xObj+'/params',
                        'share_mode': caffe_pb2.ParamSpec.STRICT,
                        'lr_mult': mult if self.code_coeff < np.inf and self.semantics == ATTRIBUTES else 0.0,     # If Attributes than codewords are used in this layer
                        'decay_mult': mult if self.code_coeff < np.inf and self.semantics == ATTRIBUTES else 0.0}  # Lock weights if code_coeff is inf
            layer_params = dict(weight_filler=FC_W_INIT, param=[w_params],
                                include=dict(not_stage='TestZeroShot')) if not deploy else {}
            ns[xObj] = L.InnerProduct(ns[xSem], name=xObj, num_output=len(self.train_classes), bias_term=False, **layer_params)

        if target_net:
            name = xObj+'_target'
            w_params = {'name': name+'/params', 'share_mode': caffe_pb2.ParamSpec.STRICT,
                        'lr_mult': 0.0, 'decay_mult': 0.0}
            layer_params = dict(weight_filler=FC_W_INIT, param=[w_params],
                                include=dict(phase=caffe.TEST, stage='TestZeroShot')) if not deploy else {}

            # NetSpec cannot handle two layers with same top blob defined for different phases/stages.
            # Workaround: Set in_place=True with no inputs, then define bottom and top fields manually.
            ns[name] = L.InnerProduct(name=name, bottom=[xSem], ntop=1, top=[xObj], in_place=True,
                                      num_output=len(self.test_classes), bias_term=False, **layer_params)
        return xObj, xSem, lCW
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号