def loadfromfile(filename, charset=None):
"""Loads data set from filename."""
try:
f = open(filename, 'rb')
except OSError as e:
stderr.write("Couldn't open data set file, error: {0}\n".format(e))
return None
else:
try:
dataset = pickle.load(f)
except Exception as e:
stderr.write("Couldn't load data set, error: {0}\n".format(e))
return None
else:
stderr.write("Loaded data set from {0}\n".format(filename))
return dataset
finally:
f.close()
python类write()的实例源码
def ask_username(config):
stdout.write("Username: ".encode('utf-8'))
raw_username = raw_input()
try:
get_user(config.temboard['users'], raw_username)
except HTTPError:
pass
except ConfigurationError:
pass
else:
stdout.write("User already exists.\n")
return ask_username(config)
try:
username = raw_username
validate_parameters({'username': username},
[('username', T_USERNAME, False)])
except HTTPError:
stdout.write("Invalid username.\n")
return ask_username(config)
return username
def print_status(progress, file_size, start):
"""
This function - when passed as `on_progress` to `Video.download` - prints
out the current download progress.
:params progress:
The lenght of the currently downloaded bytes.
:params file_size:
The total size of the video.
:params start:
The time when started
"""
percent_done = int(progress) * 100. / file_size
done = int(50 * progress / int(file_size))
dt = (clock() - start)
if dt > 0:
stdout.write("\r [%s%s][%3.2f%%] %s at %s/s " %
('=' * done, ' ' * (50 - done), percent_done,
sizeof(file_size), sizeof(progress // dt)))
stdout.flush()
def extract_all_features(save_dir, data_dir=DATA_DIR, extension=".cell"):
from naive_bayes import extract_nb_features
from random_forest import extract_rf_features
from svc1 import extract_svc1_features
from svc2 import extract_svc2_features
import subprocess
create_dir_if_not_exists(save_dir + '/knn_cells/')
subprocess.run([
'go', 'run', dirname + '/kNN.go', '-folder', data_dir + '/',
'-new_path', save_dir + '/knn_cells/', '-extension', extension]
)
# extract_features(extract_nb_features, save_dir + '/nb_cells', data_dir=data_dir, extension=extension, model_name="naive bayes")
extract_features(extract_rf_features, save_dir + '/rf_cells', data_dir=data_dir, extension=extension, model_name="random forest")
extract_features(extract_svc1_features, save_dir + '/svc1_cells', data_dir=data_dir, extension=extension, model_name="svc1")
extract_features(extract_svc2_features, save_dir + '/svc2_cells', data_dir=data_dir, extension=extension, model_name="svc2")
stdout.write("Finished extracting features\n")
def import_data(data_dir=DATA_DIR, in_memory=True, extension=".cell"):
"""
Reads all of the files in the `data_dir` and returns all of the contents in a variable.
@param data_dir is a string with the name of the data directory
@param in_memory is a boolean value. If true, it pulls all the data into memory
@return
if in_memory == True:
is a tuple with the following format: ([[size, incoming]], [webpage_label])
where outgoing is 1 is incoming and -1
else:
a tuple with the following format: ([paths], [webpage_label])
"""
stdout.write("Starting data import\n")
if in_memory:
return pull_data_in_memory(data_dir, extension)
else:
return get_files(data_dir, extension)
def parallel_cone(pipe,cells,time,cone_input,cone_layer,Vis_dark,Vis_resting_potential):
# Initialize array of cone_response copying cone_input
cone_response = cone_input
for cell in cells:
if multiprocessing.current_process().name=="root":
progress = 100*(cell-cells[0])/len(cells)
stdout.write("\r progress: %d %%"% progress)
stdout.flush()
# Time-driven simulation
for t in np.arange(0,time):
# Update dynamics of the model
cone_layer[cell].feedInput(cone_input[cell,t])
cone_layer[cell].update()
# Record response
cone_response[cell,t] = (cone_layer[cell].LF_taum.last_values[0] -\
cone_layer[cell].LF_tauh.last_values[0] - Vis_dark - Vis_resting_potential)
pipe.send(cone_response[cells,:])
pipe.close()
#! ================
#! Class runNetwork
#! ================
def update_web_event(self):
web_event = os.path.join(_base_dir, "web", "events-%s.json" % self._username)
if not os.path.exists(web_event):
self.init_event_outfile()
json_events = self.jsonify_events()
#self.bot.logger.info('####### Writing %s' % json_events)
try:
with open(web_event, "w") as outfile:
json.dump(json_events, outfile)
except (IOError, ValueError) as e:
self.bot.logger.info('[x] Error while opening events file for write: %s' % e, 'red')
except:
raise FileIOException("Unexpected error writing to {}".web_event)
def main(argv, environ):
parser = ArgumentParser(
prog='temboard-agent-adduser',
description="Add a new temboard-agent user.",
argument_default=UNDEFINED_ARGUMENT,
)
args = parser.parse_args(argv)
config = load_configuration(
specs=list_options_specs(), args=args, environ=environ,
)
# Load configuration from the configuration file.
username = ask_username(config)
password = ask_password()
hash_ = hash_password(username, password).decode('utf-8')
try:
with open(config.temboard['users'], 'a') as fd:
fd.write("%s:%s\n" % (username, hash_))
except IOError as e:
raise UserError(str(e))
else:
stdout.write("Done.\n")
def test(self, dataset, subset='test', name='Test'):
global g_args
train_writer = tf.summary.FileWriter(
os.path.join(hparams.SUMMARY_DIR,
str(datetime.datetime.now().strftime("%m%d_%H%M%S")) + ' ' + hparams.SUMMARY_TITLE), g_sess.graph)
cli_report = {}
for data_pt in dataset.epoch(
subset, hparams.BATCH_SIZE * hparams.MAX_N_SIGNAL):
# note: this disables dropout during test
to_feed = dict(
zip(self.train_feed_keys, (
np.reshape(data_pt[0], [hparams.BATCH_SIZE, hparams.MAX_N_SIGNAL, -1, hparams.FEATURE_SIZE]),
1.)))
step_summary, step_fetch = g_sess.run(
self.valid_fetches, to_feed)[:2]
train_writer.add_summary(step_summary)
stdout.write('.')
stdout.flush()
_dict_add(cli_report, step_fetch)
stdout.write(name + ': %s\n' % (
_dict_format(cli_report)))
def before_exit():
lines_of_code = process_history()
if not PySession.save or len(lines_of_code) == 0:
stdout.write(DO_NOTHING)
return
filename = expanduser(os.getenv('PYSESSION_FILENAME', 'session.py'))
if PySession.save_locally:
stdout.write(SAVING_FILE.format(filename=filename))
PySession.save_to_file('\n'.join(lines_of_code), filename)
stdout.write(SUCCESS)
return
try:
stdout.write(SAVING_GIST.format(filename=filename))
gist_response = PySession.save_to_gist('\n'.join(lines_of_code), filename)
gist_url = gist_response['html_url']
PySession.save_gist_url(gist_url)
webbrowser.open_new_tab(gist_url)
stdout.write(SUCCESS)
except:
stdout.write(FAILED)
PySession.save_to_file('\n'.join(lines_of_code), filename)
def notify(self, msg):
if isinstance(msg, basestring):
print msg
elif msg[0] == "PROGRESSBAR":
n = len(self.progressbars)
name, action = msg[1:]
if action == "start":
stdout.write(term.CLRSCR())
self.progressbars[name] = term.ProgressBar()
elif action == "end":
stdout.write(term.CLRSCR())
del self.progressbars[name]
else:
self.progressbars[name].frac = action
stdout.write(term.CUP(0, 0))
for name, progressbar in self.progressbars.iteritems():
stdout.write("%s : %s\n"%(progressbar, name))
def loadfromfile(cls, infile):
"""Load model parameters from file and rebuild model."""
with np.load(infile) as f:
# Extract hyperparams and position
p = f['p']
hparams = pickle.loads(p.tobytes())
hyper, epoch, pos = hparams['hyper'], hparams['epoch'], hparams['pos']
# Load matrices
pvalues = { n:f[n] for n in cls.pnames }
# Create instance
if isinstance(infile, str):
stdout.write("Loaded model parameters from {0}\n".format(infile))
stdout.write("Rebuilding model...\n")
model = cls(hyper, epoch, pos, pvalues)
return model
def savetofile(self, outfile):
"""Save model parameters to file."""
# Pickle non-matrix params into bytestring, then convert to numpy byte array
pklbytes = pickle.dumps({'hyper': self.hyper, 'epoch': self.epoch, 'pos': self.pos},
protocol=pickle.HIGHEST_PROTOCOL)
p = np.fromstring(pklbytes, dtype=np.uint8)
# Gather parameter matrices and names
pvalues = { n:m.get_value() for n, m in self.params.items() }
# Now save params and matrices to file
try:
np.savez_compressed(outfile, p=p, **pvalues)
except OSError as e:
raise e
else:
if isinstance(outfile, str):
stdout.write("Saved model parameters to {0}\n".format(outfile))
def build_onehots(self, vocab_size=None):
"""Build one-hot encodings of each sequence."""
# If we're passed a charset size, great - if not, fall back to inferring vocab size
if vocab_size:
self.charsize = vocab_size
vocab = vocab_size
else:
vocab = self.charsize
stderr.write("Constructing one-hot vector data...")
stderr.flush()
time1 = time.time()
# These can be large, so we don't necessarily want them on the GPU
# Thus they're not Theano shared vars
# Also, numpy fancy indexing is fun!
self.x_onehots = np.eye(vocab, dtype=th.config.floatX)[self.x_array]
self.y_onehots = np.eye(vocab, dtype=th.config.floatX)[self.y_array]
time2 = time.time()
stderr.write("done!\nTook {0:.4f} ms.\n".format((time2 - time1) * 1000.0))
def buildmodelparams(self, hyper, checkpointdir=None):
"""Builds model parameters from given hyperparameters and charset size.
Optionally saves checkpoint immediately after building if path specified.
"""
useclass = self.modeltypes[self.modeltype]
self.model = useclass(hyper)
if checkpointdir:
# Compile training functions
self.model._build_t()
# Get initial loss estimate
stderr.write("Calculating initial loss estimate...\n")
# We don't need anything fancy or long, just a rough baseline
data_len = self.valid.batchepoch(16)
loss_len = 20 if data_len >= 20 else data_len
loss = self.model.calc_loss(self.valid, 0, batchsize=8, num_examples=loss_len)
stderr.write("Initial loss: {0:.3f}\n".format(loss))
stderr.write("Initial log loss: {0:.3f}\n".format(log(loss)))
# Take checkpoint
self.newcheckpoint(loss, savedir=checkpointdir)
def _vis_graph(graph, points, worker, status):
total_points = len(points)
visible_edges = []
if status:
t0 = default_timer()
points_done = 0
for p1 in points:
for p2 in visible_vertices(p1, graph, scan='half'):
visible_edges.append(Edge(p1, p2))
if status:
points_done += 1
avg_time = round((default_timer() - t0) / points_done, 3)
time_stat = (points_done, total_points-points_done, avg_time)
status = '\r\033[' + str(21*worker) + 'C[{:4}][{:4}][{:5.3f}] \r'
stdout.write(status.format(*time_stat))
stdout.flush()
return visible_edges
def download_file(self, url: str, filepath: str, fname="", progf=False):
"""Download a file from `url` to `filepath/name`"""
r = self.session.get(url, stream=True)
dlen = r.headers.get("content-length")
step = (100 / int(dlen))
prog = 0
if not fname:
fname = unquote(Path(r.url).name)
with open(filepath+"/"+fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
prog += len(chunk)
if progf:
progf(int(step * prog))
f.write(chunk)
if progf:
progf(0)
return filepath+"/"+fname
def bruteforce(host, port, uname, wordlist):
try:
lista = open(wordlist, "r")
except IOError:
stdout.write(colored(" [x] Error opening word list\n", "red", attrs=['bold']))
exit()
url = "http://"+host+":"+port+"/"
init = time()
for l in lista:
pwd = l.strip()
try:
r=get(url, auth=(uname, pwd), timeout=3)
except:
stdout.write(colored("\n [-] There was an error connecting to the router %s\n"%(host), "red", attrs=['bold']))
exit()
if r.status_code == 200:
stdout.write(colored("\n\n [+] Cracked => %s:%s\n [+] Duration => %s seconds\n\n"%(uname, pwd, time() - init), "green", attrs=['bold']))
lista.close()
exit()
else:
stdout.write(colored("\r [-] Current login %s:%s"%(uname, pwd), "yellow", attrs=['bold']))
stdout.flush()
print ""
lista.close()
def file_stats(max_lenght):
Fl = 7 ** max_lenght
Fb = (max_lenght + 2) * Fl
if Fb >= 1000 and Fb < 1000000:
Fbfinal = Fb / 1000
stdout.write("[+] Lines : %s\n[+] File size : %i Kb\n\n"%(Fl, Fbfinal))
elif Fb >= 1000000 and Fb < 1000000000:
Fbfinal = Fb / 1000000
stdout.write("[+] Lines : %s\n[+] File size : %i Mb\n\n"%(Fl, Fbfinal))
elif Fb >= 1000000000:
setlocale(LC_NUMERIC, '')
Fbfinal = Fb / 1000000000
formato = format("%.*f", (0, Fbfinal), True)
stdout.write("[+] Lines : %s\n[+] File size : "+formato+" Gb\n\n"%(Fl))
else:
stdout.write("[+] Lines : %s\n[+] File size : %i bytes\n\n"%(Fl))
cmd = raw_input("[?] Do you want continue [Y/N] : ")
if cmd == "N" or cmd == "n":
exit()
def generator(min_lenght, max_lenght, chars, name):
lines = 0
try:
file=open(name, "w")
except IOError:
print "\n[x] Error : %s este caminho nao existe\n"%(name)
exit()
file_stats(max_lenght)
print ""
for n in range(min_lenght, max_lenght + 1):
for xs in product(chars, repeat=n):
lines = lines + 1
string=''.join(xs)
file.write(string + "\n")
stdout.write('\r[+] Saving character `%s`' % string)
stdout.flush()
print "\a"
file.close()
def check_deploy_result(operation, console, appname, auth_header):
i = 0
while True:
s = (i % 3 + 1) * '.'
if len(s) < 3:
s = s + (3 - len(s)) * ' '
i += 1
stdout.write("\r%s... %s " % (operation, s))
stdout.flush()
sleep(0.5)
result = app_status(console, appname, auth_header)
if result:
stdout.write("\r%s... %s. " % (operation, result))
stdout.flush()
stdout.write("\n")
return result
download_save_and_freeze_mobilenet.py 文件源码
项目:imagenet_models_flask
作者: alesolano
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def download_and_uncompress_tarball(base_url, filename, data_dir):
def _progress(count, block_size, total_size):
stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
stdout.flush()
tarball_url = base_url + filename
filepath = osp.join(data_dir, filename)
if not tf.gfile.Exists( osp.join(download_dir, model_dl) ):
filepath, _ = urllib.request.urlretrieve(tarball_url, filepath, _progress)
print()
statinfo = stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
else:
print('{} tarball already exists -- not downloading'.format(filename))
tarfile.open(filepath, 'r:*').extractall(data_dir)
def print_rule(self):
"""Print out rules
"""
stdout.write('======================================================\n')
stdout.write('Rules:\n')
for rule in self.rule:
head = rule[0][0]
tail = rule[0][1]
confidence = rule[1]
stdout.write('(')
stdout.write(', '.join(head))
stdout.write(')')
stdout.write(' ==> ')
stdout.write('(')
stdout.write(', '.join(tail))
stdout.write(')')
stdout.write(' confidence = {0}\n'.format(round(confidence, 3)))
stdout.write('======================================================\n')
def optimize(self, X, lmbd, Z=None, max_iter=1, tol=1e-5):
if Z is None:
batch_size = X.shape[0]
K = self.D.shape[0]
z_curr = np.zeros((batch_size, K))
else:
z_curr = np.copy(Z)
self.train_cost, self.train_z = [], []
feed = {self.X: X, self.Z: z_curr, self.lmbd: lmbd}
for k in range(max_iter):
z_curr[:], dz, cost = self.session.run(
[self.step_optim, self.dz, self._cost], feed_dict=feed)
self.train_cost += [cost]
self.train_z += [np.copy(z_curr)]
if dz < tol:
print("\r{} reached optimal solution in {}-iteration"
.format(self.name, k))
break
out.write("\rIterative optimization ({}): {:7.1%} - {:.4e}"
"".format(self.name, k/max_iter, dz))
out.flush()
self.train_cost += [self.session.run(self._cost, feed_dict=feed)]
print("\rIterative optimization ({}): {:7}".format(self.name, "done"))
return z_curr
def print_progress(self, threshold=0, decimals=1, barLength=100):
"""Print a terminal progress bar."""
# Based on @Greenstick's reply (https://stackoverflow.com/a/34325723)
iteration = self.stream.tell()
if iteration > self.file_size:
return
total = self.file_size
if total == 0:
return
progress = 100.0 * iteration / total
if self.progress and progress - self.progress < threshold:
return
self.progress = progress
percents = ("%03." + str(decimals) + "f") % progress
filledLength = int(round(barLength * iteration / float(total)))
barText = '*' * filledLength + '-' * (barLength - filledLength)
stdout.write('%s| %s%% Completed\r' % (barText, percents))
stdout.flush()
def __init__(self, parallel):
self.continuation_prompt = self.prompt
self.parallel = parallel
width, height = get_terminal_size() or MIN_TERM_SIZE
if any(map((lambda s: s[0] < s[1]), zip((height, width), MIN_TERM_SIZE))):
stdout.write("\x1b[8;{rows};{cols}t".format(rows=max(MIN_TERM_SIZE[0], height),
cols=max(MIN_TERM_SIZE[1], width)))
if self.parallel:
processes = cpu_count()
self.__last_tasklist = None
self.tasklist = {}
self.pool = Pool(processes, lambda: signal(SIGINT, SIG_IGN))
atexit.register(self.graceful_exit)
self.reexec = ['status']
self.__bind_commands()
super(FrameworkConsole, self).__init__()
self.do_loglevel('info')
self.__start_docserver()
self.do_clear('')
def wait_time(self, data={'waitSeconds': None}):
def complete(i, wait):
return ((100 * (float(i) / float(wait))) * 50) / 100
if data['waitSeconds'] is not None:
wait = data['waitSeconds'] + (random.randint(2, 4) / 3.33)
print(I18n.get('Waiting %s seconds') % str(wait))
c = i = 0
while c < 50:
c = complete(i, wait)
time.sleep(wait - i if i == int(wait) else 1)
out.write("[{}]\0\r".format('+' * int(c) + '-' * (50 - int(c))))
out.flush()
i += 1
out.write("\n")
out.flush()
return data['waitSeconds']
return 99999999
def __iter__(self):
try:
with open(self.__filename, 'rb') as file_in:
while True:
data = file_in.read(self.__chunksize)
if not data:
if self.__progressbar:
stdout.write("\n")
break
self.__readsofar += len(data)
if self.__progressbar:
percent = self.__readsofar * 100 / self.__totalsize
stdout.write("\r{percent:3.0f}%".format(percent=percent))
yield data
except OSError as error:
raise exception.SourceNotFound(error)
def create_admin(username):
from eventit.eventit import db
from eventit.models import User, Role
import getpass
from sys import stdout
password = None
password2 = None
while not password or password != password2:
password = getpass.getpass()
password2 = getpass.getpass('Please, repeat your password: ')
if not password or password != password2:
stdout.write('Passwords do not match')
user = User(username=username, email='', is_active=True, password=password)
role_admin = Role.get_role_obj('admin')
user.role = role_admin
db.session.add(user)
db.session.commit()
def LOG(message=None,type=None):
if VERBOSITY<=0:
return
elif VERBOSITY==1:
#minimal verbosity ... dot style output
if type in MSGSCHEME_MIN:
message = MSGSCHEME_MIN[type]
if DO_COLOR and type in COLORSCHEME:
message = COLORSCHEME[type]%message
stdout.write("%s"%message)
stdout.flush()
else:
if type in MSGSCHEME:
message = MSGSCHEME[type]%message
if DO_COLOR and type in COLORSCHEME:
message = COLORSCHEME[type]%message
if MODE_FUZZ:
stdout.write("[FUZZ] %s\n"% (message))
else:
stdout.write("%s\n"% (message))
stdout.flush()