python类Image()的实例源码

deepdream.py 文件源码 项目:DeepArt 作者: jiriroz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def showarray(self, a, name, fmt='jpeg'):
        a = np.uint8(np.clip(a, 0, 255))
        #f = StringIO()
        #PIL.Image.fromarray(a).save(f, fmt)
        #PIL.Image.fromarray(a).save(name + '.' + fmt, fmt)
        #display(Image(data=f.getvalue()))
        if fmt == 'jpeg':
            outputfmt = 'jpg'
        else:
            outputfmt = fmt
        PIL.Image.fromarray(a).save(name + '.' + outputfmt, fmt)

    # a couple of utility functions for converting to and from Caffe's input image layout
DeepDream.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def showarray(a, fmt='jpeg'):
    """Display image in windows"""
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


# Class DD: DeepDream
DeepDream.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def Get_guide(self):
        """Generate guide image feature"""
        guide = np.float32(imresize(PIL.Image.open(self.guide_path),224))
        h,w = guide.shape[:2]
        src, dst = self.net.blobs['data'], self.net.blobs[self.end]
        src.reshape(1,3,h,w)
        src.data[0] = self.Preprocess(guide)
        self.net.forward(end=self.end)
        self.guide_features = dst.data[0].copy()
        self.flag = 1
DeepDream.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def showarray(a, fmt='jpeg'):
    """Display image in windows"""
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


# Class DD: DeepDream
DeepDream.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def Get_guide(self):
        """Generate guide image feature"""
        guide = np.float32(imresize(PIL.Image.open(self.guide_path),224))
        h,w = guide.shape[:2]
        src, dst = self.net.blobs['data'], self.net.blobs[self.end]
        src.reshape(1,3,h,w)
        src.data[0] = self.Preprocess(guide)
        self.net.forward(end=self.end)
        self.guide_features = dst.data[0].copy()
        self.flag = 1
visualize.py 文件源码 项目:crayimage 作者: yandexdataschool 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def draw_to_notebook(layers, **kwargs):
    """
    Draws a network diagram in an IPython notebook
    :parameters:
        - layers : list or NeuralNet instance
            List of layers or the neural net to draw.
        - **kwargs : see the docstring of make_pydot_graph for other options
    """
    from IPython.display import Image
    layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
              else layers)
    dot = make_pydot_graph(layers, **kwargs)
    return Image(dot.create_png())
14_DeepDream.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def resize_image(image, size=None, factor=None):
    # If a rescaling-factor is provided then use it.
    if factor is not None:
        # Scale the numpy array's shape for height and width.
        size = np.array(image.shape[0:2]) * factor

        # The size is floating-point because it was scaled.
        # PIL requires the size to be integers.
        size = size.astype(int)
    else:
        # Ensure the size has length 2.
        size = size[0:2]

    # The height and width is reversed in numpy vs. PIL.
    size = tuple(reversed(size))

    # Ensure the pixel-values are between 0 and 255.
    img = np.clip(image, 0.0, 255.0)

    # Convert the pixels to 8-bit bytes.
    img = img.astype(np.uint8)

    # Create PIL-object from numpy array.
    img = PIL.Image.fromarray(img)

    # Resize the image.
    img_resized = img.resize(size, PIL.Image.LANCZOS)

    # Convert 8-bit pixel values back to floating-point.
    img_resized = np.float32(img_resized)

    return img_resized


# ## DeepDream Algorithm

# ### Gradient

# The following helper-functions calculate the gradient of an input image for use in the DeepDream algorithm. The Inception 5h model can accept images of any size, but very large images may use many giga-bytes of RAM. In order to keep the RAM-usage low we will split the input image into smaller tiles and calculate the gradient for each of the tiles. 
# 
# However, this may result in visible lines in the final images produced by the DeepDream algorithm. We therefore choose the tiles randomly so the locations of the tiles are always different. This makes the seams between the tiles invisible in the final DeepDream image.

# This is a helper-function for determining an appropriate tile-size. The desired tile-size is e.g. 400x400 pixels, but the actual tile-size will depend on the image-dimensions.

# In[16]:
notebook_tools.py 文件源码 项目:tissuelab 作者: VirtualPlants 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def vtk_show_polydata(polydata, width=400, height=300, position=(0,0,-160), colormap_name='glasbey', **kwargs):
    """
    Takes vtkRenderer instance and returns an IPython Image with the rendering.
    """
    from tissuelab.gui.vtkviewer.colormap_utils import colormap_from_file
    from tissuelab.gui.vtkviewer.vtk_utils import define_lookuptable, get_polydata_cell_data

    point_radius = kwargs.get('point_radius',1.0)

    if (polydata.GetNumberOfCells() == 0) and (polydata.GetNumberOfPoints() > 0):
        sphere = vtk.vtkSphereSource()
        sphere.SetRadius(point_radius)
        sphere.SetThetaResolution(12)
        sphere.SetPhiResolution(12)
        glyph = vtk.vtkGlyph3D()
        glyph.SetScaleModeToDataScalingOff()
        glyph.SetColorModeToColorByScalar()
        glyph.SetSource(sphere.GetOutput())
        glyph.SetInput(polydata)
        glyph.Update()
        polydata = glyph.GetOutput()

    # colormap = colormap_from_file("/Users/gcerutti/Developpement/openalea/oalab-tissue/tissuelab/share/data/colormaps/glasbey.lut",name="glasbey")
    colormap = load_colormaps()[colormap_name]

    irange = kwargs.get('intensity_range', None)

    cell_data = get_polydata_cell_data(polydata)
    lut = define_lookuptable(cell_data,colormap_points=colormap._color_points,colormap_name=colormap.name,intensity_range=irange)

    VtkMapper = vtk.vtkPolyDataMapper()
    VtkMapper.SetInputConnection(polydata.GetProducerPort())
    VtkMapper.SetLookupTable(lut)

    VtkActor = vtk.vtkActor()
    VtkActor.SetMapper(VtkMapper)

    VtkRenderer = vtk.vtkRenderer()
    VtkRenderer.SetBackground(1.0, 1.0, 1.0)
    VtkRenderer.AddActor(VtkActor)

    VtkRenderer.GetActiveCamera().SetPosition(*position)

    return vtk_show(VtkRenderer, width=width, height=height)


问题


面经


文章

微信
公众号

扫码关注公众号