def _nearest_pow_2(x):
"""
Find power of two nearest to x
>>> _nearest_pow_2(3)
2.0
>>> _nearest_pow_2(15)
16.0
:type x: float
:param x: Number
:rtype: Int
:return: Nearest power of 2 to x
"""
a = M.pow(2, M.ceil(np.log2(x)))
b = M.pow(2, M.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
python类ceil()的实例源码
test_libevreactor.py 文件源码
项目:deb-python-cassandra-driver
作者: openstack
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def test_partial_send(self, *args):
c = self.make_connection()
# only write the first four bytes of the OptionsMessage
write_size = 4
c._socket.send.side_effect = None
c._socket.send.return_value = write_size
c.handle_write(None, 0)
msg_size = 9 # v3+ frame header
expected_writes = int(math.ceil(float(msg_size) / write_size))
size_mod = msg_size % write_size
last_write_size = size_mod if size_mod else write_size
self.assertFalse(c.is_defunct)
self.assertEqual(expected_writes, c._socket.send.call_count)
self.assertEqual(last_write_size, len(c._socket.send.call_args[0][0]))
test_asyncorereactor.py 文件源码
项目:deb-python-cassandra-driver
作者: openstack
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def test_partial_send(self, *args):
c = self.make_connection()
# only write the first four bytes of the OptionsMessage
write_size = 4
c.socket.send.side_effect = None
c.socket.send.return_value = write_size
c.handle_write()
msg_size = 9 # v3+ frame header
expected_writes = int(math.ceil(float(msg_size) / write_size))
size_mod = msg_size % write_size
last_write_size = size_mod if size_mod else write_size
self.assertFalse(c.is_defunct)
self.assertEqual(expected_writes, c.socket.send.call_count)
self.assertEqual(last_write_size, len(c.socket.send.call_args[0][0]))
def compute_logarithmic_scale(min_, max_, min_scale, max_scale):
"""Compute an optimal scale for logarithmic"""
if max_ <= 0 or min_ <= 0:
return []
min_order = int(floor(log10(min_)))
max_order = int(ceil(log10(max_)))
positions = []
amplitude = max_order - min_order
if amplitude <= 1:
return []
detail = 10.
while amplitude * detail < min_scale * 5:
detail *= 2
while amplitude * detail > max_scale * 3:
detail /= 2
for order in range(min_order, max_order + 1):
for i in range(int(detail)):
tick = (10 * i / detail or 1) * 10 ** order
tick = round_to_scale(tick, tick)
if min_ <= tick <= max_ and tick not in positions:
positions.append(tick)
return positions
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):
img = np.copy(image)
height, width,_= img.shape
output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
ocludedImages=[]
for h in range(output_height):
for w in range(output_width):
#occluder region
h_start = h*occluding_stride
w_start = w*occluding_stride
h_end = min(height, h_start + occluding_size)
w_end = min(width, w_start + occluding_size)
input_image = copy.copy(img)
input_image[h_start:h_end,w_start:w_end,:] = 0
ocludedImages.append(preprocess(Image.fromarray(input_image)))
L = np.empty(output_height*output_width)
L.fill(groundTruth)
L = torch.from_numpy(L)
tensor_images = torch.stack([img for img in ocludedImages])
dataset = torch.utils.data.TensorDataset(tensor_images,L)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8)
heatmap=np.empty(0)
model.eval()
for data in dataloader:
images, labels = data
if use_gpu:
images, labels = (images.cuda()), (labels.cuda(async=True))
outputs = model(Variable(images))
m = nn.Softmax()
outputs=m(outputs)
if use_gpu:
outs=outputs.cpu()
heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))
return heatmap.reshape((output_height, output_width))
def percentile(N, percent, key=lambda x:x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1
# median is 50th percentile.
def gen_sieve(ceiling=None):
if ceiling is not None:
if ceiling % 2 == 0:
ceiling -= 1
highest_prime = math.ceil(math.sqrt(ceiling))
last_val = 1
found_primes = []
yield 2
while ceiling is None or ceiling > last_val:
current_val = None
while current_val is None:
current_val = last_val = last_val + 2
for prime, square in found_primes:
if current_val < square:
break
if current_val % prime == 0:
current_val = None
break
yield current_val
if ceiling is None or highest_prime > last_val:
found_primes.append((current_val, current_val ** 2))
def stop_foraging(self):
if self.last_action and self.last_action[0] == "foraging":
item = self.last_action[1]
time_spent = math.ceil(time.time() - self.last_action_time) / 60
self.last_action = None
self.last_action_time = None
item_amount = math.floor(time_spent * self.foraging_rate)
self.inventory[item] = self.inventory.get(item, 0) + item_amount
if self.inventory[item] == 0:
del self.inventory[item]
self.foraging_xp += item_amount
secondary_item = forageables[item][0]
tertiary_item = forageables[item][1]
secondary_amount = random.randint(0, item_amount)
tertiary_amount = math.floor(random.randint(0, item_amount) / 100)
self.inventory[secondary_item] = self.inventory.get(secondary_item, 0) + secondary_amount
if self.inventory[secondary_item] == 0:
del self.inventory[secondary_item]
self.inventory[tertiary_item] = self.inventory.get(tertiary_item, 0) + tertiary_amount
if self.inventory[tertiary_item] == 0:
del self.inventory[tertiary_item]
self.write_data()
return item, time_spent, item_amount, secondary_amount, tertiary_amount
else:
return False, self.last_action
def stop_woodcutting(self):
if self.last_action and self.last_action[0] == "woodcutting":
wood_type = self.last_action[1]
time_spent = math.ceil(time.time() - self.last_action_time) / 60
self.last_action = None
self.last_action = None
current_wood_lvl = wood_lvl(wood_type)
wood_amount = math.floor(time_spent * self.wood_rate(wood_type) * self.woodcutting_rate)
xp_amount = current_wood_lvl * wood_amount
self.inventory[wood_type] = self.inventory.get(wood_type, 0) + wood_amount
if self.inventory[wood_type] == 0:
del self.inventory[wood_type]
self.woodcutting_xp += xp_amount
self.write_data()
return wood_type, time_spent, wood_amount, xp_amount
else:
return False, self.last_action
def download_mutation_images(self):
# TODO: dunno if this works
import ipywidgets
import math
views = []
for g in self.reference_gempro.genes:
if g.protein.representative_structure:
view = g.protein.view_all_mutations(alignment_type='seqalign', grouped=False, structure_opacity=0.5,
opacity_range=(0.6, 1), scale_range=(.5, 5))
view._remote_call("setSize", target='Widget', args=['300px', '300px'])
view.download_image(filename='{}_{}_mutations.png'.format(g.id, g.name))
views.append(view)
hboxes = [ipywidgets.HBox(views[i * 3:i * 3 + 3])
for i in range(int(math.ceil(len(views) / 3.0)))]
vbox = ipywidgets.VBox(hboxes)
return vbox
def __call__(self):
total_processes = _calculate_workers()
ctxt = {
"service_name": self.service_name,
"user": self.user,
"group": self.group,
"script": self.script,
"admin_script": self.admin_script,
"public_script": self.public_script,
"processes": int(math.ceil(self.process_weight * total_processes)),
"admin_processes": int(math.ceil(self.admin_process_weight *
total_processes)),
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
def __call__(self):
total_processes = _calculate_workers()
ctxt = {
"service_name": self.service_name,
"user": self.user,
"group": self.group,
"script": self.script,
"admin_script": self.admin_script,
"public_script": self.public_script,
"processes": int(math.ceil(self.process_weight * total_processes)),
"admin_processes": int(math.ceil(self.admin_process_weight *
total_processes)),
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
def __call__(self):
total_processes = _calculate_workers()
ctxt = {
"service_name": self.service_name,
"user": self.user,
"group": self.group,
"script": self.script,
"admin_script": self.admin_script,
"public_script": self.public_script,
"processes": int(math.ceil(self.process_weight * total_processes)),
"admin_processes": int(math.ceil(self.admin_process_weight *
total_processes)),
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
def __call__(self):
total_processes = _calculate_workers()
ctxt = {
"service_name": self.service_name,
"user": self.user,
"group": self.group,
"script": self.script,
"admin_script": self.admin_script,
"public_script": self.public_script,
"processes": int(math.ceil(self.process_weight * total_processes)),
"admin_processes": int(math.ceil(self.admin_process_weight *
total_processes)),
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
def __call__(self):
total_processes = _calculate_workers()
ctxt = {
"service_name": self.service_name,
"user": self.user,
"group": self.group,
"script": self.script,
"admin_script": self.admin_script,
"public_script": self.public_script,
"processes": int(math.ceil(self.process_weight * total_processes)),
"admin_processes": int(math.ceil(self.admin_process_weight *
total_processes)),
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
def save_ims(filename, ims, dpi=100, scale=0.5):
n, c, h, w = ims.shape
rows = int(math.ceil(math.sqrt(n)))
cols = int(round(math.sqrt(n)))
fig, axes = plt.subplots(rows, cols, figsize=(w*cols/dpi*scale, h*rows/dpi*scale), dpi=dpi)
for i, ax in enumerate(axes.flat):
if i < n:
ax.imshow(ims[i].transpose((1, 2, 0)))
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.1, hspace=0.1)
plt.savefig(filename, dpi=dpi, bbox_inces='tight', transparent=True)
plt.clf()
plt.close()
def __init__(self, archivelist):
self.node_archives = archivelist
self.jobdir = os.path.dirname(archivelist[0])
self.job_id = "1"
self.end_str = "end"
self.walltime = 9751
self.nodecount = len(archivelist)
self.acct = {"end_time": 12312, "id": 1, "uid": "sdf", "user": "werqw"}
self.nodes = ["node" + str(i) for i in xrange(len(archivelist))]
self._data = {}
archive_starts = []
archive_ends = []
for archive in archivelist:
context = pmapi.pmContext(c_pmapi.PM_CONTEXT_ARCHIVE, archive)
mdata = context.pmGetArchiveLabel()
archive_starts.append(datetime.datetime.utcfromtimestamp(math.floor(mdata.start)))
archive_ends.append(datetime.datetime.utcfromtimestamp(math.ceil(context.pmGetArchiveEnd())))
self.start_datetime = min(archive_starts)
self.end_datetime = max(archive_ends)
def adjust_job_start_end(job):
""" Set the job node start and end times based on the presence of the special
job-X-begin and job-X-end archives. Do nothing if these archives are absent
"""
startarchive = "job-{0}-begin".format(job.job_id)
endarchive = "job-{0}-end".format(job.job_id)
for nodename, filepaths in job.rawarchives():
begin = None
end = None
for fname in filepaths:
filename = os.path.basename(fname)
if filename.startswith(startarchive):
context = pmapi.pmContext(c_pmapi.PM_CONTEXT_ARCHIVE, fname)
mdata = context.pmGetArchiveLabel()
begin = datetime.datetime.utcfromtimestamp(math.floor(mdata.start))
if filename.startswith(endarchive):
context = pmapi.pmContext(c_pmapi.PM_CONTEXT_ARCHIVE, fname)
end = datetime.datetime.utcfromtimestamp(math.ceil(context.pmGetArchiveEnd()))
job.setnodebeginend(nodename, begin, end)
touch_compiler_timestamps.py 文件源码
项目:mongo_module_ninja
作者: RedBeard0531
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def run_if_needed(base_file, then_file, now_file):
# Python uses doubles for mtime so it can't precisely represent linux's
# nanosecond precision. Round up to next whole second to ensure we get a
# stable timestamp that is guaranteed to be >= the timestamp of the
# compiler. This also avoids issues if the compiler is on a file system
# with high-precision timestamps, but the build directory isn't.
base_stat = os.stat(base_file)
mtime = math.ceil(base_stat.st_mtime)
atime = math.ceil(base_stat.st_atime)
if (os.path.exists(then_file)
and os.path.exists(now_file)
and os.stat(then_file).st_mtime == mtime):
return # Don't need to do anything.
createIfNeeded(now_file)
os.utime(now_file, None) # None means now
createIfNeeded(then_file)
os.utime(then_file, (atime, mtime))
def col_profile(num_cols, height):
profile = np.zeros(num_cols)
peak_width = int(math.ceil(num_cols * 0.125))
# average number of pixels should be height
for i in range(0, peak_width):
profile[i] = height
# average number of pixels should be 10% of height
for i in range(peak_width, num_cols - peak_width):
profile[i] = height * .1
# average number of pixels should be height
for i in range(num_cols - peak_width, num_cols):
profile[i] = height
# normalize to between 0 and 1
profile *= 1.0 / profile.max()
return profile