def parse(self):
fname = self.fname.text()
if os.path.isfile(fname):
f = open(fname, "r")
else:
sys.stderr.write("Unable to open %s\n"%fname)
return
self.vol = np.fromfile(f, dtype='f8')
self.size = int(np.ceil(np.power(len(self.vol), 1./3.)))
self.vol = self.vol.reshape(self.size, self.size, self.size)
self.center = self.size/2
if not self.image_exists:
self.layer_slider.setRange(0, self.size-1)
self.layernum.setMaximum(self.size-1)
self.layer_slider.setValue(self.center)
self.layerslider_moved(self.center)
self.old_fname = fname
python类fromfile()的实例源码
def _parse_headers(self):
self.num_data_list = []
self.ones_accum_list = []
self.multi_accum_list = []
self.num_pix = []
for i, photons_file in enumerate(self.photons_list):
with open(photons_file, 'rb') as f:
num_data = np.fromfile(f, dtype='i4', count=1)[0]
self.num_pix.append(np.fromfile(f, dtype='i4', count=1)[0])
if self.num_pix[i] != len(self.geom_list[i].x):
sys.stderr.write('Warning: num_pix for %s is different (%d vs %d)\n' % (photons_file, self.num_pix[i], len(self.geom_list[i].x)))
f.seek(1024, 0)
ones = np.fromfile(f, dtype='i4', count=num_data)
multi = np.fromfile(f, dtype='i4', count=num_data)
self.num_data_list.append(num_data)
self.ones_accum_list.append(np.cumsum(ones))
self.multi_accum_list.append(np.cumsum(multi))
self.num_data_list = np.cumsum(self.num_data_list)
self.num_frames = self.num_data_list[-1]
def read_data(self, start=None, end=None):
"""read data from file and store it locally"""
nframe = self._find_nframe_from_file()
seek_to_data(self.file_object)
read_start = 0
end_read = nframe * self.nifs * self.nchans
if start is not None:
if start < 0:
read_start = (nframe + start) * self.nifs * self.nchans
elif start >= 0:
read_start = start * self.nifs * self.nchans
if end is not None:
if end < 0:
end_read = (nframe + end) * self.nifs * self.nchans
elif end >= 0:
end_read = end * self.nifs * self.nchans
self.file_object.seek(read_start, os.SEEK_CUR)
nbytes_to_read = end_read - read_start
data = np.fromfile(self.file_object, count=nbytes_to_read, dtype=self.dtype)
nframe = data.size // self.nifs // self.nchans
data = data.reshape((nframe, self.nifs, self.nchans))
if self.nbits < 8:
data = unpack(data, self.nbits)
self.data = data
return self.data
def readheader(self):
self.fh.seek(0,0)
spam = self.fh.read(12)
self.nSamples, self.sampPeriod, self.sampSize, self.parmKind = \
unpack(">IIHH", spam)
# Get coefficients for compressed data
if self.parmKind & _C:
self.dtype = 'h'
self.veclen = self.sampSize / 2
if self.parmKind & 0x3f == IREFC:
self.A = 32767
self.B = 0
else:
self.A = numpy.fromfile(self.fh, 'f', self.veclen)
self.B = numpy.fromfile(self.fh, 'f', self.veclen)
if self.swap:
self.A = self.A.byteswap()
self.B = self.B.byteswap()
else:
self.dtype = 'f'
self.veclen = self.sampSize / 4
self.hdrlen = self.fh.tell()
self.veclen = int(self.veclen)
def read_from_gnt_dir(gnt_dir=train_data_dir):
def one_file(f):
header_size = 10
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)
tagcode = header[5] + (header[4]<<8)
width = header[6] + (header[7]<<8)
height = header[8] + (header[9]<<8)
if header_size + width*height != sample_size:
break
image = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))
yield image, tagcode
for file_name in os.listdir(gnt_dir):
if file_name.endswith('.gnt'):
file_path = os.path.join(gnt_dir, file_name)
with open(file_path, 'rb') as f:
for image, tagcode in one_file(f):
yield image, tagcode
def read_from_gnt_dir(gnt_dir=train_data_dir):
def one_file(f):
header_size = 10
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)
tagcode = header[5] + (header[4]<<8)
width = header[6] + (header[7]<<8)
height = header[8] + (header[9]<<8)
if header_size + width*height != sample_size:
break
image = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))
yield image, tagcode
for file_name in os.listdir(gnt_dir):
if file_name.endswith('.gnt'):
file_path = os.path.join(gnt_dir, file_name)
with open(file_path, 'rb') as f:
for image, tagcode in one_file(f):
yield image, tagcode
def read_from_gnt_dir(gnt_dir=train_data_dir):
def one_file(f):
header_size = 10
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)
tagcode = header[5] + (header[4]<<8)
width = header[6] + (header[7]<<8)
height = header[8] + (header[9]<<8)
if header_size + width*height != sample_size:
break
image = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))
yield image, tagcode
for file_name in os.listdir(gnt_dir):
if file_name.endswith('.gnt'):
file_path = os.path.join(gnt_dir, file_name)
with open(file_path, 'rb') as f:
for image, tagcode in one_file(f):
yield image, tagcode
# ?????
def get_embedding():
embedding_path = os.path.join(FLAGS.datasets_dir, "wordVectors.txt")
if not tf.gfile.Exists(embedding_path):
raise ValueError("embedding file not exists")
# embedding = np.fromfile(embedding_path, sep=' ')
# print("embedding size:", embedding.shape)
# print("embedding size:", embedding.dtype)
# embedding.reshape(100232, 50)
# print("embedding size:", embedding.shape)
data = np.fromfile(embedding_path, dtype=np.float32, sep=' ')
print("shape:", data.shape)
print("ndim:", data.ndim)
print("dtype:", data.dtype)
print(data)
print("reshape vocabulary")
d = data.reshape((-1, 50))
print("shape:", d.shape)
print("ndim:", d.ndim)
print("dtype:", d.dtype)
print(d)
return d
def plot_conf_mat(densmap_name):
fig = plt.figure(figsize = (20,20))
plt.clf()
ax = fig.add_subplot(111)
#ax.set_aspect(1)
densmap = np.fromfile(densmap_name, np.float32)
densmap = densmap.reshape(227, 227)
densmap *= 100
densmap[densmap > 1] = 1
res = ax.imshow(densmap, cmap = plt.cm.jet,
interpolation = 'nearest')
plt.savefig('density.jpg')
img = cv2.imread("density.jpg")
img = cv2.resize(img, (227,227))
cv2.imshow("i", img)#
cv2.waitKey(0)
#plt.show()
def load_mnist(self):
data_dir = os.path.join("./data", "mnist")
fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd , dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28 , 28 , 1)).astype(np.float)
fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.float)
fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28 , 28 , 1)).astype(np.float)
fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.float)
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0)
y = np.concatenate((trY, teY), axis=0)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(y)
#convert label to one-hot
y_vec = np.zeros((len(y), 10), dtype=np.float)
for i, label in enumerate(y):
y_vec[i, int(y[i])] = 1.0
return X / 255. , y_vec
def unpack(stream):
base = stream.tell()
header = Header.unpack(stream)
influence_groups = [None]*header.influence_group_count
inverse_bind_matrices = None
stream.seek(base + header.influence_count_offset)
for i in range(header.influence_group_count):
influence_count = uint8.unpack(stream)
influence_groups[i] = [Influence(None,None) for _ in range(influence_count)]
stream.seek(base + header.index_offset)
for influence_group in influence_groups:
for influence in influence_group:
influence.index = uint16.unpack(stream)
stream.seek(base + header.weight_offset)
for influence_group in influence_groups:
for influence in influence_group:
influence.weight = float32.unpack(stream)
if header.inverse_bind_matrix_offset != 0:
stream.seek(base + header.inverse_bind_matrix_offset)
element_type = numpy.dtype((numpy.float32,(3,4))).newbyteorder('>')
element_count = (header.section_size - header.inverse_bind_matrix_offset)//element_type.itemsize
inverse_bind_matrices = numpy.fromfile(stream,element_type,element_count)
stream.seek(base + header.section_size)
return influence_groups,inverse_bind_matrices
def unpack_array(stream,attribute_format,size):
if attribute_format.attribute == gx.VA_POS:
component_type = gx.ComponentType(attribute_format.component_type)
component_count = gx.PositionComponentCount(attribute_format.component_count)
array_type = Array
elif attribute_format.attribute == gx.VA_NRM:
component_type = gx.ComponentType(attribute_format.component_type)
component_count = gx.NormalComponentCount(attribute_format.component_count)
array_type = Array
elif attribute_format.attribute in gx.VA_CLR:
component_type = gx.ColorComponentType(attribute_format.component_type)
component_count = gx.ColorComponentCount(attribute_format.component_count)
array_type = ColorArray
elif attribute_format.attribute in gx.VA_TEX:
component_type = gx.ComponentType(attribute_format.component_type)
component_count = gx.TexCoordComponentCount(attribute_format.component_count)
array_type = Array
else:
raise FormatError('invalid vertex attribute')
element_type = array_type.create_element_type(component_type,component_count)
element_count = size//element_type.itemsize
array = numpy.fromfile(stream,element_type,element_count).view(array_type)
array.attribute = attribute_format.attribute
array.component_type = component_type
array.component_count = component_count
array.scale_exponent = attribute_format.scale_exponent
return array
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream. Raise StopIteration if the
property could not be read.
'''
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def convert_f0(f0, src, trg):
mu_s, std_s = np.fromfile(os.path.join('./etc', '{}.npf'.format(src)), np.float32)
mu_t, std_t = np.fromfile(os.path.join('./etc', '{}.npf'.format(trg)), np.float32)
lf0 = tf.where(f0 > 1., tf.log(f0), f0)
lf0 = tf.where(lf0 > 1., (lf0 - mu_s)/std_s * std_t + mu_t, lf0)
lf0 = tf.where(lf0 > 1., tf.exp(lf0), lf0)
return lf0
def test():
# ==== Test: batch mixer (conclusion: capacity should be larger to make sure good mixing) ====
x, y = read('./dataset/vcc2016/bin/*/*/1*001.bin', 32, min_after_dequeue=1024, capacity=2048)
sv = tf.train.Supervisor()
with sv.managed_session() as sess:
for _ in range(200):
x_, y_ = sess.run([x, y])
print(y_)
# ===== Read binary ====
features = read_whole_features('./dataset/vcc2016/bin/Training Set/SF1/*001.bin')
sv = tf.train.Supervisor()
with sv.managed_session() as sess:
features = sess.run(features)
y = pw2wav(features)
sf.write('test1.wav', y, 16000) # TODO fs should be specified externally.
# ==== Direct read =====
f = './dataset/vcc2016/bin/Training Set/SF1/100001.bin'
features = np.fromfile(f, np.float32)
features = np.reshape(features, [-1, 513*2 + 1 + 1 + 1]) # f0, en, spk
y = pw2wav(features)
sf.write('test2.wav', y, 16000)
def read_float64_as_float32(filename):
x = np.fromfile(filename, np.float64)
return x.astype(np.float32)
def main():
''' NOTE: The input is rescaled to [-1, 1] '''
dirs = validate_log_dirs(args)
tf.gfile.MakeDirs(dirs['logdir'])
with open(args.architecture) as f:
arch = json.load(f)
with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
json.dump(arch, f, indent=4)
normalizer = Tanhize(
xmax=np.fromfile('./etc/xmax.npf'),
xmin=np.fromfile('./etc/xmin.npf'),
)
image, label = read(
file_pattern=arch['training']['datadir'],
batch_size=arch['training']['batch_size'],
capacity=2048,
min_after_dequeue=1024,
normalizer=normalizer,
)
machine = MODEL(arch)
loss = machine.loss(image, label)
trainer = TRAINER(loss, arch, args, dirs)
trainer.train(nIter=arch['training']['max_iter'], machine=machine)
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream.
'''
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
n = _np.fromfile(stream, len_t, 1)[0]
return _np.fromfile(stream, val_t, n)