doc2veckeras.py 文件源码

python
阅读 22 收藏 0 点赞 0 评论 0

项目:word2vec-keras-in-gensim 作者: niitsuma 项目源码 文件源码
def build_keras_model_dm_concat(index_size,vector_size,vocab_size,
                                #code_dim,
                                context_size,
                                window_size,
                                learn_doctags=True, learn_words=True, learn_hidden=True,
                                model=None ,
                                word_vectors=None,doctag_vectors=None,hidden_vectors=None
                                ):
    """
    >>> syn0=np.array([[1,-2],[-1,2],[2,-2]],'float32')
    >>> word_vectors=syn0
    >>> syn1=np.array([[-1,2,1,-5,4,1,-2,3,-4,5],[3,4,-4,1,-2,6,-7,8,9,1],[5,-6,-8,7,6,-1,2,-3,4,5]],'float32')
    >>> hidden_vectors=syn1
    >>> doctag_vectors=np.array([[-1.1,2.2],[-3.2,-4.3],[-1.1,-1.4]],'float32')
    >>> kerasmodel=build_keras_model_dm_concat(index_size=3,vector_size=2,vocab_size=3,context_size=3,window_size=2,word_vectors=word_vectors,doctag_vectors=doctag_vectors,hidden_vectors=hidden_vectors)
    >>> ind=[[0],[1]]
    >>> iwd=[[0,0,1,2],[1,1,2,0]]
    >>> ipt=[[0],[1]]
    >>> tmp1=kerasmodel.predict({'index':np.array(ind),'iword':np.array(iwd),'point':np.array(ipt)})['code']
    >>> tmp2=np.array([[np.vstack((doctag_vectors[ind[i]],word_vectors[iwd[i]])).flatten().dot(hidden_vectors[j]) for j in ipt[i] ] for i in range(2)])
    >>> np.linalg.norm(1/(1+np.exp(-tmp2))-tmp1) < 0.001
    True
    """

    kerasmodel = Graph()
    kerasmodel.add_input(name='iword' , input_shape=(1,), dtype=int)
    kerasmodel.add_input(name='index' , input_shape=(1,), dtype=int)
    if word_vectors is None:
        kerasmodel.add_node(Embedding(vocab_size, vector_size,input_length=2*window_size,trainable=learn_words,),name='embedword', input='iword')
    else:
        kerasmodel.add_node(Embedding(vocab_size, vector_size,input_length=2*window_size,trainable=learn_words,weights=[word_vectors]),name='embedword', input='iword')
    if doctag_vectors is None:
        kerasmodel.add_node(Embedding(index_size, vector_size,input_length=1,trainable=learn_doctags,), name='embedindex', input='index')
    else:
        kerasmodel.add_node(Embedding(index_size, vector_size,input_length=1,trainable=learn_doctags,weights=[doctag_vectors]), name='embedindex', input='index')

    kerasmodel.add_input(name='point',input_shape=(1,)     , dtype=int)
    if hidden_vectors is None:
        kerasmodel.add_node(Embedding(context_size, (2*window_size+1)*vector_size,input_length=1, trainable=learn_hidden,),name='embedpoint', input='point')
    else:
        kerasmodel.add_node(Embedding(context_size, (2*window_size+1)*vector_size,input_length=1, trainable=learn_hidden,weights=[hidden_vectors]),name='embedpoint', input='point')

    kerasmodel.add_node(Flatten(),name='merge',inputs=['embedindex','embedword'],merge_mode='concat', concat_axis=1)
    kerasmodel.add_node(Activation('sigmoid'), name='sigmoid',inputs=['merge','embedpoint'], merge_mode='dot',dot_axes=-1)
    kerasmodel.add_output(name='code',input='sigmoid')
    kerasmodel.compile('rmsprop', {'code':'mse'})
    return kerasmodel
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号