def hosts_disable_all():
"""
status de host 0 = enabled
status de host 1 = disabled
"""
logger.info('Disabling all hosts, in blocks of 1000')
hosts = zapi.host.get(output=[ 'hostid' ], search={ 'status': 0 })
maxval = int(ceil(hosts.__len__())/1000+1)
bar = ProgressBar(maxval=maxval,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
i = 0
for i in xrange(maxval):
block = hosts[:1000]
del hosts[:1000]
result = zapi.host.massupdate(hosts=[ x for x in block ], status=1)
i += 1
bar.update(i)
bar.finish()
logger.info('Done')
return
python类ProgressBar()的实例源码
def proxy_passive_to_active():
"""
status de prxy 5 = active
status de prxy 6 = passive
"""
logger.info('Change all proxys to active')
proxys = zapi.proxy.get(output=[ 'shorten', 'host' ],
filter={ 'status': 6 })
if ( proxys.__len__() == 0 ):
logger.info('Done')
return
bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
i = 0
for x in proxys:
i += 1
proxyid = x['proxyid']
result = zapi.proxy.update(proxyid=proxyid, status=5)
logger.echo = False
logger.debug('Changed from passive to active proxy: %s' % (x['host']))
bar.update(i)
bar.finish()
logger.echo = True
logger.info('Done')
return
def _parse(self, xml_file):
self.xml_file = xml_file
self.key_count = 0
if not self.suppliers:
self.suppliers = IceCatSupplierMapping(log=self.log, auth=self.auth, data_dir=self.data_dir)
if not self.categories:
self.categories = IceCatCategoryMapping(log=self.log, data_dir=self.data_dir, auth=self.auth)
print("Parsing products from index file:", xml_file)
with progressbar.ProgressBar(max_value=progressbar.UnknownLength) as self.bar:
with open(self.xml_file, 'rb') as f:
self.o = xmltodict.parse(f, attr_prefix='', postprocessor=self._postprocessor,
namespace_separator='', process_namespaces=True, namespaces=self._namespaces)
f.closed
# peel down to file key
self.o = self.o['icecat-interface']['files.index']['file']
self.log.info("Parsed {} products from IceCat catalog".format(str(len(self.o))))
return len(self.o)
def load(self, table):
cache = {}
def save(row):
(query, values) = self.get_insert(row, table)
try:
prepared = cache[query]
except:
prepared = self.session.prepare(query)
cache[query] = prepared
bound = prepared.bind(values)
self.session.execute(bound)
pool = Pool(100)
i = 0
print "Loading {}".format(table)
with ProgressBar(max_value=len(self.dataframe)) as p:
for _ in pool.imap_unordered(save, self.iter()):
i += 1
if i % 10 == 0:
p.update(i)
def train(self, epochs, batch_size, learning_rate, save_to=None):
self.train_step = pt.apply_optimizer(tf.train.AdamOptimizer(learning_rate, epsilon=1), losses = [self.error_function])
init = tf.initialize_all_variables()
self.sess.run(init)
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=epochs).start()
while self.get_epoch() < epochs:
input_data = self.hdf5reader.next()
_, loss_value = self.sess.run(
[self.train_step, self.error_function],
{
self.encoder.input_data: input_data
}
)
pbar.update(self.get_epoch())
pbar.finish()
def images_to_hdf5(dir_path, output_hdf5, size = (112,112), channels = 3, resize_to = None):
files = sorted(os.listdir(dir_path))
nr_of_images = len(files)
if resize_to:
size = resize_to
i = 0
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=nr_of_images).start()
data = np.empty(shape=(nr_of_images, size[0], size[1], channels), dtype=np.uint8)
for f in files:
datum = imread(dir_path + '/' + f)
if resize_to:
datum = np.asarray(Image.fromarray((datum), 'RGB').resize((size[0],size[1]), PIL.Image.ANTIALIAS))
data[i,:,:,:] = datum
i = i + 1
pbar.update(i)
pbar.finish()
with h5py.File(output_hdf5, 'w') as hf:
hf.create_dataset('data', data=data)
def identify_many(scenes):
"""
return metadata handlers of all valid scenes in a list, similar to function identify
prints a progressbar
:param scenes: a list of file names
:return: a list of pyroSAR metadata handlers
"""
idlist = []
pbar = pb.ProgressBar(maxval=len(scenes)).start()
for i, scene in enumerate(scenes):
if isinstance(scene, ID):
idlist.append(scene)
else:
try:
id = identify(scene)
idlist.append(id)
except IOError:
continue
pbar.update(i + 1)
pbar.finish()
return idlist
def test(location):
"""Test with a single bar.
Input: location - tuple (x, y) defining the position on the
screen of the progress bar
"""
# fd is an object that has a .write() method
writer = Writer(location)
pbar = ProgressBar(fd=writer)
# progressbar usage
pbar.start()
for i in range(100):
# do stuff
# time taken for process is function of line number
# t_wait = location[1] / 100
# time take is random
t_wait = random.random() / 50
time.sleep(t_wait)
# update calls the write method
pbar.update(i)
pbar.finish()
def test_bars(locations):
"""Test with multiple bars.
Input: locations - a list of location (x, y) tuples
"""
writers = [Writer(loc) for loc in locations]
pbars = [ProgressBar(fd=writer) for writer in writers]
for pbar in pbars:
pbar.start()
for i in range(100):
time.sleep(0.01)
for pbar in pbars:
pbar.update(i)
for pbar in pbars:
pbar.finish()
def do():
system_calls = {}
p = progressbar.ProgressBar(maxval=500).start()
for index, line in enumerate(open('unistd.h', 'r').readlines()):
# print repr(line.strip())
try:
if '(' in line:
p.update(index)
num = re.search('\((.*)\)', line).group(1)
num = int(eval(num))
func_name = line.split('(')[0].strip()
system_calls[num] = [func_name]
system_calls[num].extend(get_system_call(func_name))
# print system_calls[num][1]
except Exception as e:
print index, line, e.message
p.finish()
open('system_calls', 'w').write(zlib.compress(pickle.dumps(system_calls)))
def load_corpus(self, corenlpserver, process=True):
"""
Use the PubMed web services to retrieve the title and abstract of each PMID
:param corenlpserver:
:param process:
:return:
"""
time_per_abs = []
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.AdaptiveETA(), ' ', pb.Timer()]
pbar = pb.ProgressBar(widgets=widgets, maxval=len(self.pmids), redirect_stdout=True).start()
for i, pmid in enumerate(self.pmids):
t = time.time()
newdoc = PubmedDocument(pmid)
if newdoc.abstract == "":
logging.info("ignored {} due to the fact that no abstract was found".format(pmid))
continue
newdoc.process_document(corenlpserver, "biomedical")
self.documents["PMID" + pmid] = newdoc
abs_time = time.time() - t
time_per_abs.append(abs_time)
pbar.update(i+1)
pbar.finish()
abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
logging.info("average time per abstract: %ss" % abs_avg)
def run_random_search(search):
#Create progressbar to show how many searches have been done, removing eta
search.progressbar = ProgressBar(1)
search.progressbar.fmt = '''%(percent)3d%% %(bar)s %(current)s/%(total_items)s %(items_per_sec)s Run time: %(run_time)s'''
buckets_found = get_buckets_found(search.output_file)
#Get all public butets that have been found so far
search.buckets_found = get_buckets_found(search.output_file)
#Create a string generator
search.string_generator = createStringGenerator(search)
my_queue = Queue.Queue()
for i in range(search.threads):
t = threading.Thread(target=search_instance, args=(search, ))
my_queue.put(t)
#Run all of the threads
while not my_queue.empty():
my_queue.get().start()
def run(path, quiet=False):
"""
Downloads all available hash files to a given path.
:param path: Path to download directory
:param quiet: If set to True, no progressbar is displayed
"""
if os.path.isdir(path):
session = requests.Session()
session.headers = {'User-agent': 'Mozilla/5.0 Chrome/57.0.2987.110'}
max_num = max(list(map(int, re.sub(r'[\<\>]',
'',
'\n'.join(re.findall(r'\>[1-9][0-9]{2}\<',
session.get('https://virusshare.com/hashes.4n6').text
)
)
).split('\n')
)
)
)
if not quiet:
p = progressbar.ProgressBar(max_value=max_num)
for i in range(max_num):
filename = str(i).zfill(3) + '.md5'
if os.path.exists(os.path.join(path, filename)):
continue
if not quiet:
p.update(i)
url = URL + filename
head = session.head(url)
if head.status_code == 200:
body = session.get(url, stream=True)
with io.open(os.path.join(path, str(i).zfill(3) + '.md5'), mode='wb') as afile:
for chunk in body.iter_content(chunk_size=1024):
afile.write(b'' + chunk)
body.close()
else:
print('Given path is not a directory.')
sys.exit(1)
def main():
print('Looking for latest release')
response = requests.get(RELEASES_URL)
if(response.ok):
release = json.loads(response.text)
print('Found latest release with version {0}'.format(release['tag_name']))
if(len(release['assets']) > 0):
downloadableAssetIndex = -1
for index,asset in enumerate(release['assets']):
if(asset['name'][0:5] == 'build' and downloadableAssetIndex == -1):
downloadableAssetIndex = index
if(downloadableAssetIndex == -1):
print('Could not find downloadable release build, aborting')
else:
print('Found downloadable build with name {0}'.format(release['assets'][downloadableAssetIndex]['name']))
print('Downloading latest client release with version {0}'.format(release['tag_name']))
buildDownloadUrl = release['assets'][downloadableAssetIndex]['browser_download_url']
buildFileName = release['assets'][downloadableAssetIndex]['name']
r = requests.get(buildDownloadUrl, stream=True)
# bar = progressbar.ProgressBar(max_value=len(r.content))
with progressbar.ProgressBar(max_value=len(r.content)) as bar:
deleteContentsOfFolder(DOWNLOAD_FOLDER)
with open(join(DOWNLOAD_FOLDER,buildFileName), 'wb') as file:
for chunk in r.iter_content(chunk_size=1024):
bar.update(len(chunk))
file.write(chunk)
print('Download finished')
deleteContentsOfFolder(DESTINATION_FOLDER)
with zipfile.ZipFile(join(DOWNLOAD_FOLDER,buildFileName), 'r') as zip:
print('Extracting downloaded file into {0}'.format(DESTINATION_FOLDER))
zip.extractall(DESTINATION_FOLDER)
print('Finished')
return True
else:
print('Could not get info about latest release')
return False
def run(args):
if args.download:
resolvers = download_resolvers()
else:
resolvers = load_resolvers(args.resolvers)
random.shuffle(resolvers)
pool = gevent.pool.Pool(args.concurrency)
bar = progressbar.ProgressBar(redirect_stdout=True, redirect_stderr=True)
for resolver in bar(resolvers):
pool.add(gevent.spawn(check_resolver, args, resolver))
pool.join()
def __init__(self, options):
self.wildcards = []
self.options = options
self.domains = []
if options.domains:
self.domains += filter(None, options.domains.read().split("\n"))
self.domains += options.domain
self.domains = list(set(self.domains))
random.shuffle(self.domains)
self.resolvers = map(str.strip, filter(None, options.resolvers.read().split("\n")))
random.shuffle(self.resolvers)
self.names = [X for X in self._load_names(options.names)]
if options.progress:
self.progress = progressbar.ProgressBar(
redirect_stdout=True,
redirect_stderr=True,
widgets=[
progressbar.Percentage(),
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
])
else:
self.progress = None
self.finished = 0
LOG.info("%d names, %d resolvers, %d domains",
len(self.names), len(self.resolvers), len(self.domains))
def compute_embeddings(images):
"""Runs inference on an image.
Args:
image: Image file names.
Returns:
Dict mapping image file name to embedding.
"""
# Creates graph from saved GraphDef.
create_graph()
filename_to_emb = {}
config = tf.ConfigProto(device_count = {'GPU': 0})
bar = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
with tf.Session(config=config) as sess:
i = 0
for image in bar(images):
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
embedding_tensor = sess.graph.get_tensor_by_name('pool_3:0')
embedding = sess.run(embedding_tensor,
{'DecodeJpeg/contents:0': image_data})
filename_to_emb[image] = embedding.reshape(2048)
i += 1
# print(image, i, len(images))
return filename_to_emb
# temp_dir is a subdir of temp
def main(project_id, video_basename, sampling_rate=3):
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # or any {'0', '1', '2'}
video_name = video_basename[:video_basename.index('.')]
# extract video frames
extracted_frame_dir = os.path.join('temp', project_id, video_name, 'frames')
mkdir_p(extracted_frame_dir)
if not os.path.isdir(extracted_frame_dir):
os.mkdir(extracted_frame_dir)
video_path = os.path.join('videos', project_id, video_basename)
vidcap = cv2.VideoCapture(video_path)
print('Extracting video frames...')
bar = progressbar.ProgressBar(maxval=101, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
fps = vidcap.get(CV_CAP_PROP_FPS)# TODO
fps = fps if fps != float('nan') else 25
print 'actual fps', fps, 'sampling rate', sampling_rate
success, image = vidcap.read()
frames_to_extract = range(0, int(vidcap.get(CV_CAP_PROP_FRAME_COUNT)), int(round(fps / sampling_rate)))
frame_count = len(frames_to_extract)
for frame_pos in bar(frames_to_extract):
vidcap.set(CV_CAP_PROP_POS_FRAMES, frame_pos)
success, image = vidcap.read()
# print('Read a new frame: %f ms'% vidcap.get(CV_CAP_PROP_POS_MSEC), success)
cv2.imwrite(os.path.join(extracted_frame_dir, "%09d.jpg" % vidcap.get(CV_CAP_PROP_POS_MSEC)), image) # TODO (might still work)
bar.finish()
def download(number, save_dir='./'):
"""Download pre-trained word vector
:param number: integer, default ``None``
:param save_dir: str, default './'
:return: file path for downloaded file
"""
df = load_datasets()
row = df.iloc[[number]]
url = ''.join(row.URL)
if not url:
print('The word vector you specified was not found. Please specify correct name.')
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets)
def dlProgress(count, blockSize, totalSize):
if pbar.max_value is None:
pbar.max_value = totalSize
pbar.start()
pbar.update(min(count * blockSize, totalSize))
file_name = url.split('/')[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, file_name)
path, _ = urlretrieve(url, save_path, reporthook=dlProgress)
pbar.finish()
return path
def __enter__(self):
self.bar = progressbar.ProgressBar(
widgets=[
progressbar.Percentage(),
' ',
progressbar.Bar(),
progressbar.FileTransferSpeed(),
' ',
progressbar.ETA(),
],
max_value=self.max_value,
)
self.fd = open(self.output_path, 'wb')
return self