def init_file_path(directory):
"""
Get the image file path array
:param directory: the directory that store images
:return: an array of image file path
"""
paths = []
if not debug:
print "Throwing all gray space images now... this may takes some time.."
for file_name in os.listdir(directory):
# Skip files that is not jpg
file_path = '%s/%s' % (directory, file_name)
if not file_name.endswith('.jpg') or imghdr.what(file_path) is not 'jpeg':
continue
if debug:
paths.append(file_path)
else:
# Throw gray space images, this takes long time if have many images
# TODO: maybe can change to a fast way
img = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] != 1:
paths.append(file_path)
return paths
python类IMREAD_UNCHANGED的实例源码
def maskFace(self, frame_image, face):
img1 = cv2.imread(self.__class__.mask_path, cv2.IMREAD_UNCHANGED);
elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);
h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
mask = self.getTransPIL(cv2.warpPerspective(img1, h, (frame_image.width,frame_image.height)))
mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
enhancer = ImageEnhance.Color(frame_image)
enhanced = enhancer.enhance(0.1)
enhancer = ImageEnhance.Brightness(enhanced)
enhanced = enhancer.enhance(1.2)
enhancer = ImageEnhance.Contrast(enhanced)
enhanced = enhancer.enhance(1.2)
frame_image.paste(enhanced, (0,0), mask)
frame_image.paste(mask_elements, (0,0), mask_elements)
def upload():
# Get the name of the uploaded file
file = request.files['file']
# Check if the file is one of the allowed types/extensions
if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
filename = secure_filename(file.filename)
# Move the file form the temporal folder to
# the upload folder we setup
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Redirect the user to the uploaded_file route, which
# will basicaly show on the browser the uploaded file
# CV2
#img_np = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_UNCHANGED) # cv2.IMREAD_COLOR in OpenCV 3.1
img_np = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename), -1)
cv2.imshow("Image", img_np)
return redirect(url_for('uploaded_file',
filename=filename))
# This route is expecting a parameter containing the name
# of a file. Then it will locate that file on the upload
# directory and show it on the browser, so if the user uploads
# an image, that image is going to be show after the upload
def get_rescaled(fname, metadata, directory, rescaled_directory):
# TODO(dek): move rescaling to its own function
rescaled_fname = fname + ".rescaled.png"
rescaled = os.path.join(rescaled_directory, rescaled_fname)
if not os.path.exists(rescaled):
print "Unable to find cached rescaled image for", fname
return None
image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
if image is None:
print "Failed to read image from", rescaled
return None
b_channel, g_channel, r_channel = cv2.split(image)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
return image
def read_labeled_data(images_dir, labels_file):
images_data = []
labels_list = [int(x.strip())
for x in open(labels_file, 'r').readlines()]
images_list = sorted(os.listdir(images_dir))
for im in images_list:
with open(os.path.join(
images_dir, im), 'rb') as img_stream:
file_bytes = np.asarray(
bytearray(img_stream.read()), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(
file_bytes, cv2.IMREAD_UNCHANGED)
images_data.append(img_data_ndarray)
return np.asarray(images_data), \
np.asarray(labels_list)
def read_labeled_data2(images_dir):
dirs_list = os.listdir(images_dir)
images_data = []
labels_list = []
for d in dirs_list:
images_list = os.listdir(
os.path.join(images_dir, d))
for im in images_list:
with open(os.path.join(
images_dir, d, im), 'rb') as img_stream:
file_bytes = np.asarray(
bytearray(img_stream.read()), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(
file_bytes, cv2.IMREAD_UNCHANGED)
images_data.append(img_data_ndarray)
labels_list.append(int(d))
return np.asarray(images_data), \
np.asarray(labels_list)
def load_img(file_path):
try:
if os.path.exists(file_path):
return cv2.imread(file_path)
elif file_path.startswith('http'):
with urlopen(file_path) as fp:
img_bin = numpy.fromstring(fp.read(), dtype=numpy.uint8)
mime = fp.getheader('Content-Type', '')
print(mime)
if MIME_JPG_PTN.match(mime):
return cv2.imdecode(img_bin, cv2.IMREAD_UNCHANGED)
elif MIME_PNG_PTN.match(mime):
return cv2.imdecode(img_bin, cv2.IMREAD_UNCHANDED)
else:
sys.stderr.write('Unacceptable mime type {}.\n'.format(mime))
else:
sys.stderr.write('{} is not found.\n'.format(file_path))
except Exception as e:
sys.stderr.write('Failed to load {} by {}\n'.format(file_path, e))
return None
def imread_process_cb(scale=1.0, grayscale=False):
return lambda fn: im_resize(cv2.imread(fn, cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED), scale=scale)
def dispread_process_cb(scale=1.0):
"""Scale disparity values for depth images"""
return lambda fn: im_resize(cv2.imread(fn, cv2.IMREAD_UNCHANGED), scale=scale) * scale
def get_img(path, mask=False):
if mask:
return cv2.imread(path, cv2.IMREAD_UNCHANGED)
else:
return cv2.imread(path)
def download_image(url):
response = requests.get(url, stream=True, timeout=5)
# TODO use grequests
# Raise exception on error
response.raise_for_status()
numpy_array = np.asarray(bytearray(response.raw.read()), dtype=np.uint8)
image = cv2.imdecode(numpy_array, cv2.IMREAD_COLOR)
# TODO: handle transparency (load using cv2.IMREAD_UNCHANGED and convert alpha layer to white?)
return image
def load_hat(self, path): # pylint: disable=no-self-use
"""Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
"""
hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if hat is None:
raise ValueError('No hat image found at `{}`'.format(path))
b, g, r, a = cv2.split(hat)
return cv2.merge((r, g, b, a))
def maskMouth(self, frame_image, face):
elements = cv2.imread(self.__class__.mask_mouth_path, cv2.IMREAD_UNCHANGED);
h, status = cv2.findHomography(self.average_mouth_points, np.array(self.getMouthPoints(face)))
mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
frame_image.paste(mask_elements, (0,0), mask_elements)
def maskFace(self, frame_image, face):
elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);
h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
frame_image.paste(mask_elements, (0,0), mask_elements)
def open(self, filename):
p = self.preferences
# open in 8 bit?
if p.p8bit.value():
col = 0
else:
col = cv2.IMREAD_ANYDEPTH
if p.pGrey.value() and not p.pSplitColors.value():
col = col | cv2.IMREAD_GRAYSCALE
else:
col |= cv2.IMREAD_ANYCOLOR
# OPEN
img = cv2.imread(str(filename), col) # cv2.IMREAD_UNCHANGED)
if img is None:
raise Exception("image '%s' doesn't exist" % filename)
# crop
if p.pCrop.value():
r = (p.pCropX0.value(),
p.pCropX1.value(),
p.pCropY0.value(),
p.pCropY1.value())
img = img[r[0]:r[1], r[2]:r[3]]
# resize
if p.pResize.value():
img = cv2.resize(img, (p.pResizeX.value(), p.pResizeY.value()))
labels = None
if img.ndim == 3:
if p.pSplitColors.value():
img = np.transpose(img, axes=(2, 0, 1))
labels = ['blue', 'green', 'red']
else:
# rgb convention
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# change data type to float
img = self.toFloat(img)
return img, labels
def read_resize_img(fname, target_size=None, target_height=None,
target_scale=None, gs_255=False):
'''Read an image (.png, .jpg, .dcm) and resize it to target size.
'''
if target_size is None and target_height is None:
raise Exception('One of [target_size, target_height] must not be None')
if path.splitext(fname)[1] == '.dcm':
img = dicom.read_file(fname).pixel_array
else:
if gs_255:
img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(fname, cv2.IMREAD_UNCHANGED)
if target_height is not None:
target_width = int(float(target_height)/img.shape[0]*img.shape[1])
else:
target_height, target_width = target_size
if (target_height, target_width) != img.shape:
img = cv2.resize(
img, dsize=(target_width, target_height),
interpolation=cv2.INTER_CUBIC)
img = img.astype('float32')
if target_scale is not None:
img_max = img.max() if img.max() != 0 else target_scale
img *= target_scale/img_max
return img
def create_test_data(self):
im = cv2.imread(root('resources', 'color_circle.png'),
flags=cv2.IMREAD_UNCHANGED)
if self.mode == 'RGB':
im = im[:, :, :3]
elif self.mode == 'RGBA':
pass
elif self.mode == 'L':
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
else:
raise ValueError('Unknown mode: {}'.format(self.mode))
# Fine for upscaling
im = cv2.resize(im, tuple(self.size), interpolation=cv2.INTER_CUBIC)
return [im]
def get_rescaled(fname, rescaled_directory):
rescaled_fname = fname + ".rescaled.png"
rescaled = os.path.join(rescaled_directory, rescaled_fname)
image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
if image is None:
print "Failed to read image from", rescaled
return i, None
# hisEqulColor(image)
b_channel, g_channel, r_channel = cv2.split(image)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
return image
def load(path, mode=cv.IMREAD_COLOR):
'''
load image for give path in cv object.
:param mode: can be any of the following:
cv.IMREAD_COLOR
cv.IMREAD_GRAYSCALE
cv.IMREAD_UNCHANGED (for 16-bit/32-bit image).
'''
img = cv.imread(path, mode)
if img is None:
raise Exception("Error: Image not found in %s." % path)
return img
def analyse_isomaps(self):
print ('analysing isomaps...')
for example in self.examples_all:
img = cv2.imread(example.images[0], cv2.IMREAD_UNCHANGED)
#blurryness_map = cv2.Laplacian(img, cv2.CV_64F)
#blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges
#example.blurryness = blurryness_map.var()
example.blurryness = _get_gradient_magnitude(img)
example.coverage = _calc_isomap_coverage(img)
def isomap_playground():
isomaps =[]
for i in range(len(isomap_paths)):
isomaps.append(cv2.imread(isomap_paths[i], cv2.IMREAD_UNCHANGED))
old_isomap_merged = np.zeros([ISOMAP_SIZE, ISOMAP_SIZE, 4], dtype='uint8')
all_isomaps_merged = merge(isomaps)
show_isomap('all_isomaps_merged', all_isomaps_merged)
#cv2.waitKey()
#cv2.destroyAllWindows()
#exit()
for i in range(len(isomaps)):
new_isomap_merged = merge([old_isomap_merged, isomaps[i]])
#blurryness = cv2.Laplacian(isomaps[i], cv2.CV_64F).var()
blurryness_map = cv2.Laplacian(isomaps[i], cv2.CV_64F)
blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges
blurryness = blurryness_map.var()
#show_isomap('laplac',cv2.Laplacian(isomaps[i], cv2.CV_8U))
#print ('max', np.max(cv2.Laplacian(isomaps[i], cv2.CV_64F)), 'min', np.min(cv2.Laplacian(isomaps[i], cv2.CV_64F)))
coverage = calc_isomap_coverage(isomaps[i])
print(isomap_paths[i]," isomap coverage:",coverage,"blur detection:",blurryness, "overall score", coverage*coverage*blurryness)
show_isomap('new isomap', isomaps[i])
show_isomap('merge', new_isomap_merged)
cv2.waitKey()
old_isomap_merged = new_isomap_merged
#cv2.imwrite('/user/HS204/m09113/Desktop/merge_test.png', isomap_merged)
#cv2.waitKey()
#cv2.destroyAllWindows()
def merge_isomaps_pg():
isomap_paths = ['/user/HS204/m09113/my_project_folder/Boris/new_isomaps/image-00058.isomap.png', '/user/HS204/m09113/my_project_folder/Boris/new_isomaps/image-00456.isomap.png']
isomaps =[]
for i in range(len(isomap_paths)):
isomaps.append(cv2.imread(isomap_paths[i], cv2.IMREAD_UNCHANGED))
isomap_merged = merge(isomaps)
cv2.imwrite('/user/HS204/m09113/my_project_folder/Boris/new_isomaps/merged.png', isomap_merged[:,:,:3])
def loadDepthMap(self,filename=None,objName=None,imgNum=None):
'''
Read a kinect depth-map as stored with the linemod dataset
'''
if filename is None:
filename = '{basepath}{objname}/data/depth{num}.dpt'.format(basepath=self.basepath,objname=objName,num=imgNum)
_,ext = os.path.splitext(filename)
if ext == ".dpt":
with open(filename) as f:
h,w = numpy.fromfile(f,dtype=numpy.uint32,count=2)
#print('w {}, h {}'.format(w,h))
data = numpy.fromfile(f,dtype=numpy.uint16,count=w*h)
data = data.reshape((h,w))
data = data.astype(numpy.float32)/10. # make it cm, as everything else is in cm now
elif ext == ".png":
data = cv2.imread(filename,cv2.IMREAD_UNCHANGED).astype(numpy.float32) / 10.
else:
raise ValueError("Unkown depth image file format '{}'".format(ext))
return data
def RadonDemo(self):
origimg = cv2.imread('./SheppLogan_Phantom.tif', cv2.IMREAD_UNCHANGED)
longaxis = np.round(np.sqrt(origimg.shape[0] ** 2.0 + origimg.shape[1] ** 2.0))
img = np.zeros((longaxis, longaxis), origimg.dtype)
xoffset = (longaxis - origimg.shape[0]) / 2
yoffset = (longaxis - origimg.shape[1]) / 2
img[xoffset:xoffset + origimg.shape[0], yoffset:yoffset + origimg.shape[1]] = origimg
angleStep = 1.0
rimg = self.RadonFilter(img, angleStep)
iimg = self.IRadonFilter(rimg, origimg.shape, img.shape, angleStep)
#cv2.namedWindow('orig')
#cv2.imshow('orig', img)
#cv2.waitKey(0)
def AStarDemo(self):
img = cv2.imread("Astar.jpg", cv2.IMREAD_UNCHANGED)
initPos = ( 200, 127)
endPos = (210,400)#(180, 400)
img = self.AStar(img, initPos, endPos)
img[initPos] = 255
img[endPos] = 255
cv2.namedWindow("img")
cv2.imshow("img", img)
cv2.waitKey(0)
def read_data(images_dir):
images_data = []
images_list = sorted(os.listdir(images_dir))
for im in images_list:
with open(os.path.join(
images_dir, im), 'rb') as img_stream:
file_bytes = np.asarray(
bytearray(img_stream.read()), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(
file_bytes, cv2.IMREAD_UNCHANGED)
images_data.append(img_data_ndarray)
return np.asarray(images_data)
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
if DEBUG:
print 'Loading:', roidb[i]['image']
if cfg.TRAIN.FOURCHANNELS:
im = cv2.imread(roidb[i]['image'], cv2.IMREAD_UNCHANGED)
else:
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims, four_channels=cfg.TRAIN.FOURCHANNELS)
return blob, im_scales
def load():
global onehotLabels
global allImagesTrain
global allLabelsTrain
global allImagesTest
global allLabelsTest
global allImagesValidate
global allLabelsValidate
allImages = []
allLabels = []
with open("digit.out") as f:
content = f.readlines()
for line in content:
parts = line.split(",")
fileName = imagePath + parts[0]
print fileName
img = cv2.resize(cv2.imread(fileName, cv2.IMREAD_UNCHANGED), (imageSize, imageSize))
# white wash image
if whiteWash:
imgMean = np.mean(img)
#std = np.sqrt(np.sum(np.square(img - imgMean)) / (32 * 32))
img = img.astype(np.float32)
img -= imgMean
#img /= std
allImages.append(img)
allLabels.append(parts[1])
if debug and len(allLabels) > 1000:
break
onehotLabels = np.zeros((len(allLabels), 10))
onehotLabels[np.arange(len(allLabels)), allLabels] = 1
trainIdx = int(len(allLabels) * dataRatio[0])
testIdx = int(trainIdx + len(allLabels) * dataRatio[1])
allImagesTrain = allImages[:trainIdx]
allLabelsTrain = onehotLabels[:trainIdx]
allImagesTest = allImages[trainIdx:testIdx]
allLabelsTest = onehotLabels[trainIdx:testIdx]
allImagesValidate = allImages[testIdx:]
allLabelsValidate = onehotLabels[testIdx:]
def stamp_and_number_image(directory, rescaled_directory, map_directory, data_directory, output_directory, blah):
try:
t = (RES_Y, RES_X, 4)
i = blah['i']
fname = blah['fname']
poly = blah['poly']
image = get_rescaled(fname, rescaled_directory)
map_fname = os.path.join(map_directory, "map.%05d.png" % i)
map_pad = np.zeros(t, dtype=np.uint8)
map_ = cv2.imread(map_fname, cv2.IMREAD_UNCHANGED)
s = map_.shape
map_pad[t[0]-s[0]-40:t[0]-40, t[1]-s[1]-40:t[1]-40] = map_
image = cv2.addWeighted(map_pad, 1, image, 1, 0)
berkeley_logo_pad = np.zeros(t, dtype=np.uint8)
berkeley_logo = cv2.imread(os.path.join(data_directory, "logo_footer_berkeley.png"), cv2.IMREAD_UNCHANGED)
s = berkeley_logo.shape
berkeley_logo_pad[40:40+s[0], 1700:1700+s[1]] = berkeley_logo
image = cv2.addWeighted(berkeley_logo_pad, 1, image, 1, 0)
google_logo_pad = np.zeros(t, dtype=np.uint8)
google_logo = cv2.imread(os.path.join(data_directory, "logo_footer_google.png"), cv2.IMREAD_UNCHANGED)
s = google_logo.shape
google_logo_pad[40:40+s[0], 1800:1800+s[1]] = google_logo
image = cv2.addWeighted(google_logo_pad, 1, image, 1, 0)
megamovie_logo_pad = np.zeros(t, dtype=np.uint8)
megamovie_logo = cv2.imread(os.path.join(data_directory, "EclipseMovie_logo_crop.png"), cv2.IMREAD_UNCHANGED)
s = megamovie_logo.shape
megamovie_logo_pad[40:40+s[0], 40:40+s[1]] = megamovie_logo
image = cv2.addWeighted(megamovie_logo_pad, 1, image, 1, 0)
im = Image.new("RGBA", (image.shape[1], image.shape[0]), (0,0,0,0))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("ProductSans-Regular.ttf", 39)
txt = "Eclipse Megamovie 2017"
draw.text((140, 45), txt, (255,255,255,255), font=font)
tfmt = poly[2].strftime("%H:%M:%S")
txt = "Time at Umbral Center: %s" % tfmt
draw.text((1350, 1040), txt, (255,255,255,255), font=font)
# txt = "Frame #%d %s" % (i, fname)
# draw.text((20, 1040), txt, (255,255,255,255), font=font)
x = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)
b_channel, g_channel, r_channel = cv2.split(x)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
x = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
image = cv2.addWeighted( x, 1, image, 1, 0.0)
new_fname = os.path.join(output_directory, "%05d.png" % i)
cv2.imwrite(new_fname, image)
return i, fname, True
except Exception as e:
traceback.print_exc(limit=50)
return i, fname, False
def merge_sm_with_tf(isomap_lists, confidence_lists, output_list):
import tensorflow as tf
import cnn_tf_graphs
from shutil import copyfile
#zipped_input = zip(isomap_lists, confidence_lists, output_list)
#zipped_input.sort(key=lambda x: len(x[0]))
#isomap_lists, confidence_lists, output_list = zip(*zipped_input)
sorted_idx_list = sorted(range(len(isomap_lists)), key=lambda x: len(isomap_lists[x]))
#print (sorted_idx_list)
isomap_lists = [isomap_lists[i] for i in sorted_idx_list]
confidence_lists = [confidence_lists[i] for i in sorted_idx_list]
output_list = [output_list[i] for i in sorted_idx_list]
#print ('HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH')
#for i in range(len(isomap_lists)):
# print (isomap_lists[i])
# print (confidence_lists[i])
# print (output_list[i])
#isomap_lists.sort(key=len)
merge_legth = -1
sess = None
for j, isomap_list in enumerate(isomap_lists):
with tf.Graph().as_default():
if len(isomap_list) == 0:
continue
elif len(isomap_list) ==1:
copyfile(isomap_list[0],output_list[j])
else:
if len(isomap_list) != merge_legth:
if sess:
sess.close()
placeholders = []
outpath = tf.placeholder(tf.string)
for i in range(len(isomap_list)):
colour = tf.placeholder(tf.float32, shape=(1, ISOMAP_SIZE, ISOMAP_SIZE, 3))
conf = tf.placeholder(tf.float32, shape=(1, ISOMAP_SIZE, ISOMAP_SIZE, 1))
placeholders.append([colour, conf])
merged = tf.squeeze(cnn_tf_graphs.merge_isomaps_softmax(placeholders))
merged_uint8 = tf.cast(merged, tf.uint8)
encoded = tf.image.encode_png(merged_uint8)
write_file_op = tf.write_file(outpath, encoded)
merge_legth = len(isomap_list)
sess = tf.Session()
print ('merging',merge_legth,'images (max',len(isomap_lists[-1]),') idx',j,'of',len(isomap_lists))
feed_dict = {}
for i in range(len(isomap_list)):
feed_dict[placeholders[i][0]] = np.expand_dims(cv2.imread(isomap_list[i], cv2.IMREAD_UNCHANGED)[:,:,:3].astype(np.float32)[:,:,::-1], axis=0)
feed_dict[placeholders[i][1]] = np.expand_dims(np.load(confidence_lists[j][i]).astype(np.float32), axis=0)
feed_dict[outpath] = output_list[j]
sess.run(write_file_op, feed_dict=feed_dict)