python类expand_dims()的实例源码

test_backends.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
conopt_particle_env.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def render(self, mode='human', close=False):#*args, **kwargs):
        env = self.conopt_env
        if close:
            if 'viewer' in env.__dict__:
                env.viewer.close()
                del env.viewer
        else:
            img = env.world.model.render(np.expand_dims(env.x, 0))[0]
            if mode == 'human':
                #import cv2
                #img = cv2.resize(img, (50, 50))
                if not 'viewer' in env.__dict__:
                    from gym.envs.classic_control.rendering import SimpleImageViewer
                    env.viewer = SimpleImageViewer()
                env.viewer.imshow(img)
                return img
            else:
                return img
feedforward.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def readimg(img_path):
    img = misc.imread(img_path, mode='RGB')

    img = misc.imresize(img, (160, 160))
    img = facenet.prewhiten(img)
    img = np.expand_dims(img, axis=0)

    return img
feedforward.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_embedding(img_path):
    img = misc.imread(img_path, mode='RGB')

    # judge alignment
    aligned = align.align(160, img, [0, 0, img.shape[1], img.shape[0]], landmarkIndices=landmarkIndices)


    img = facenet.prewhiten(img)
    img = np.expand_dims(img, axis=0)

    aligned = facenet.prewhiten(aligned)
    aligned = np.expand_dims(aligned, axis=0)


    # Run forward pass to calculate embeddings
    feed_dict = {images_placeholder: img, phase_train_placeholder: False}
    emb = sess.run(embeddings, feed_dict=feed_dict)

    # Run forward pass to calculate embeddings
    feed_dict_aligned = {images_placeholder: aligned, phase_train_placeholder: False}
    emb_aligned = sess.run(embeddings, feed_dict=feed_dict_aligned)

    return emb.ravel(), emb_aligned.ravel()

# # for test
# import os
# from time import time
# def main(dir_path):
#     img_all = os.listdir(dir_path)
#     for f in img_all:
#         start = time()
#         embedding_result = get_embedding(os.path.join(dir_path, f))
#         print time() - start
#         print embedding_result
#
# main('./data')
kshape.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def zscore(a, axis=0, ddof=0):
    a = np.asanyarray(a)
    mns = a.mean(axis=axis)
    sstd = a.std(axis=axis, ddof=ddof)
    if axis and mns.ndim < a.ndim:
        res = (((a - np.expand_dims(mns, axis=axis)) /
                np.expand_dims(sstd,axis=axis)))
    else:
        res = (a - mns) / sstd
    return np.nan_to_num(res)
classifier.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def is_catastrophe(self, obs):
        X = np.expand_dims(obs, axis=0)
        X = np.reshape(X, [X.shape[0], -1])
        return self.classifier.predict(X)[0]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_features_episode(self, episode):
        features = {}
        observations = [frame.observation for frame in episode.frames if frame.has_action()]
        features['observation'] = np.concatenate(
            [np.expand_dims(observation, axis=0) for observation in observations], axis=0)
        images = [frame.image for frame in episode.frames if frame.has_action()]
        features['image'] = np.concatenate(
            [process_image(image, self.hparams) for image in images], axis=0)
        actions = [frame.get_proposed_action() for frame in episode.frames if frame.has_action()]
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        features['index'] = np.array(
            [i for i, frame in enumerate(episode.frames) if frame.has_action()])
        return features
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def load_features_incident_records(self, incident_records):
        original_images = []
        actions = []
        for block_record_file in incident_records:
            with open(block_record_file, 'rb') as f:
                block_record = pickle.load(f)
                original_images.append(block_record.obs)
                actions.append(block_record.action)
        features = {}
        features['image'] = np.concatenate(
            [process_image(image, self.hparams) for image in original_images], axis=0)
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        return features, original_images
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def process_image(image, hparams):
    desired_shape = hparams.image_shape
    if hparams.image_crop_region is not None:
        image = image[hparams.image_crop_region[0][0]:hparams.image_crop_region[0][1],
                      hparams.image_crop_region[1][0]:hparams.image_crop_region[1][1]]
    if not tuple(image.shape) == tuple(desired_shape):
        image = cv2.resize(image, (desired_shape[1], desired_shape[0]))
    assert tuple(image.shape) == tuple(desired_shape), "{}, {}".format(image.shape, desired_shape)
    return np.expand_dims(image.astype(np.float32) / 256.0, axis=0)
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def label(self, features, episode):
        labels = np.array(
            [self._has_label(frame) for frame in episode.frames if frame.has_action()])
        actions = [
            frame.get_action(self.action_type) for frame in episode.frames if frame.has_action()
        ]
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        return features, labels
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_features_episode(self, episode):
        features = {}
        observations = [frame.observation for frame in episode.frames if frame.has_action()]
        features['observation'] = np.concatenate(
            [np.expand_dims(observation, axis=0) for observation in observations], axis=0)
        images = [frame.image for frame in episode.frames if frame.has_action()]
        features['image'] = np.concatenate(
            [process_image(image, self.hparams) for image in images], axis=0)
        actions = [frame.get_proposed_action() for frame in episode.frames if frame.has_action()]
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        features['index'] = np.array(
            [i for i, frame in enumerate(episode.frames) if frame.has_action()])
        return features
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_features_incident_records(self, incident_records):
        original_images = []
        actions = []
        for block_record_file in incident_records:
            with open(block_record_file, 'rb') as f:
                block_record = pickle.load(f)
                original_images.append(block_record.obs)
                actions.append(block_record.action)
        features = {}
        features['image'] = np.concatenate(
            [process_image(image, self.hparams) for image in original_images], axis=0)
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        return features, original_images
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def process_image(image, hparams):
    desired_shape = hparams.image_shape
    if hparams.image_crop_region is not None:
        image = image[hparams.image_crop_region[0][0]:hparams.image_crop_region[0][1],
                      hparams.image_crop_region[1][0]:hparams.image_crop_region[1][1]]
    if not tuple(image.shape) == tuple(desired_shape):
        image = cv2.resize(image, (desired_shape[1], desired_shape[0]))
    assert tuple(image.shape) == tuple(desired_shape), "{}, {}".format(image.shape, desired_shape)
    return np.expand_dims(image.astype(np.float32) / 256.0, axis=0)
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def label(self, features, episode):
        labels = np.array(
            [self._has_label(frame) for frame in episode.frames if frame.has_action()])
        actions = [
            frame.get_action(self.action_type) for frame in episode.frames if frame.has_action()
        ]
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        return features, labels
classifier.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def is_catastrophe(self, obs):
        X = np.expand_dims(obs, axis=0)
        X = np.reshape(X, [X.shape[0], -1])
        return self.classifier.predict(X)[0]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_features_incident_records(self, incident_records):
        original_images = []
        actions = []
        for block_record_file in incident_records:
            with open(block_record_file, 'rb') as f:
                block_record = pickle.load(f)
                original_images.append(block_record.obs)
                actions.append(block_record.action)
        features = {}
        features['image'] = np.concatenate(
            [process_image(image, self.hparams) for image in original_images], axis=0)
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        return features, original_images
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def process_image(image, hparams):
    desired_shape = hparams.image_shape
    if hparams.image_crop_region is not None:
        image = image[hparams.image_crop_region[0][0]:hparams.image_crop_region[0][1],
                      hparams.image_crop_region[1][0]:hparams.image_crop_region[1][1]]
    if not tuple(image.shape) == tuple(desired_shape):
        image = cv2.resize(image, (desired_shape[1], desired_shape[0]))
    assert tuple(image.shape) == tuple(desired_shape), "{}, {}".format(image.shape, desired_shape)
    return np.expand_dims(image.astype(np.float32) / 256.0, axis=0)
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def label(self, features, episode):
        labels = np.array(
            [self._has_label(frame) for frame in episode.frames if frame.has_action()])
        actions = [
            frame.get_action(self.action_type) for frame in episode.frames if frame.has_action()
        ]
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        return features, labels
classifier.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def is_catastrophe(self, obs):
        X = np.expand_dims(obs, axis=0)
        X = np.reshape(X, [X.shape[0], -1])
        return self.classifier.predict(X)[0]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def load_features_episode(self, episode):
        features = {}
        observations = [frame.observation for frame in episode.frames if frame.has_action()]
        features['observation'] = np.concatenate(
            [np.expand_dims(observation, axis=0) for observation in observations], axis=0)
        images = [frame.image for frame in episode.frames if frame.has_action()]
        features['image'] = np.concatenate(
            [process_image(image, self.hparams) for image in images], axis=0)
        actions = [frame.get_proposed_action() for frame in episode.frames if frame.has_action()]
        features['action'] = np.expand_dims(np.array(actions), axis=1)
        features['index'] = np.array(
            [i for i, frame in enumerate(episode.frames) if frame.has_action()])
        return features


问题


面经


文章

微信
公众号

扫码关注公众号