utils_tf.py 文件源码

python
阅读 25 收藏 0 点赞 0 评论 0

项目:FeatureSqueezing 作者: QData 项目源码 文件源码
def tf_model_eval_distance_dual_input(sess, x, model, X_test1, X_test2):
    """
    Compute the L1 distance between prediction of original and squeezed data.
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param model: model output predictions
    :param X_test: numpy array with training inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """
    # Define sympbolic for accuracy
    # acc_value = keras.metrics.categorical_accuracy(y, model)

    # l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
    #                                 axis=1))
    # l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
    # l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)

    l1_dist_vec = np.zeros((len(X_test1)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test1)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test1)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test1), start + FLAGS.batch_size)
            cur_batch_size = end - start

            pred_1 = model.eval(feed_dict={x: X_test1[start:end],keras.backend.learning_phase(): 0})
            pred_2 = model.eval(feed_dict={x: X_test2[start:end],keras.backend.learning_phase(): 0})

            l1_dist_vec[start:end] = np.sum(np.abs(pred_1 - pred_2), axis=1)
        assert end >= len(X_test1)

    return l1_dist_vec
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号