def assign_node_constraints(snic, axes, face_constraints):
"""assign node constraints to prescribed node planes
Nodes shared on multiple faces have are assigned with the following order
of precedence: z, y, x
:param snic: sorted node IDs and coordinates from nodes.dyn
:param axes: mesh axes [x, y, z]
:param face_constraints: list of DOF strings ordered by
((xmin, max), (ymin, ...)
(e.g., (('1,1,1,1,1,1' , '0,1,0,0,1,0'),...)
:return: bcdict - dictionary of node BC to be written to bc.dyn
"""
from fem_mesh import extractPlane
from numpy import ndenumerate
bcdict = {}
for axis in range(0, 3):
for axlim in range(0, 2):
if axlim == 0:
axis_limit = axes[axis].min()
else:
axis_limit = axes[axis].max()
planeNodeIDs = extractPlane(snic, axes, (axis, axis_limit))
for i, id in ndenumerate(planeNodeIDs):
bcdict[id] = face_constraints[axis][axlim]
return bcdict
python类ndenumerate()的实例源码
def constrain_sym_pml_nodes(bcdict, snic, axes, pml_elems, edge_constraints):
"""make sure that all "side" nodes for the PML elements are fully
constrained, instead of being assigned the symmetry constraints
THIS FUNCTION IS NOT NEEDED!!
:param bcdict:
:param snic:
:param axes:
:param pml_elems:
:param edge_constraints:
:return: bcdict
"""
from fem_mesh import extractPlane
from numpy import ndenumerate
# look for x symmetry face
for axis in range(0, 2):
if edge_constraints[0][axis][0]:
axis_limit = axes[axis].min()
elif edge_constraints[0][axis][1]:
axis_limit = axes[axis].max()
if axis_limit is not None:
planeNodeIDs = extractPlane(snic, axes, (axis, axis_limit))
pml_node_ids_zmin = planeNodeIDs[:, 0:(pml_elems[2][0] + 1)]
pml_node_ids_zmax = planeNodeIDs[:, -(pml_elems[2][1] + 1):]
for i, id in ndenumerate(pml_node_ids_zmin):
bcdict[id] = "%s" % '1,1,1,1,1,1'
for i, id in ndenumerate(pml_node_ids_zmax):
bcdict[id] = "%s" % '1,1,1,1,1,1'
axis_limit = None
return bcdict
def create_zdisp(nodeidlist, disp_slice_z_only, zdisp):
"""create zdisp array from squeezed disp_slice at appropriate index
:param nodeidlist: first column of disp_slice with node IDs in row order
:param disp_slice_z_only: squeezed disp_slice of just zisp
:returns: zdisp -- array of z-disp in rows corresponding to node ID
(for fast read access)
"""
import numpy as np
for i, nodeid in np.ndenumerate(nodeidlist):
zdisp[nodeid] = disp_slice_z_only[i]
return zdisp
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
def expand(self, move_probabilities):
self.children = {move: MCTSNode(self, move, prob)
for move, prob in np.ndenumerate(move_probabilities)}
# Pass should always be an option! Say, for example, seki.
self.children[None] = MCTSNode(self, None, 0)
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['groundTruth'][0,0][0,0]['SegmentationClass'].astype(np.uint16)
for (x,y), value in np.ndenumerate(label):
label[x,y] = self.class_map[0][value-1]
label = label.astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
# pdb.set_trace()
return label
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['groundTruth'][0,0][0,0]['SegmentationClass'].astype(np.uint16)
for (x,y), value in np.ndenumerate(label):
label[x,y] = self.class_map[0][value-1]
label = label.astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
# pdb.set_trace()
return label
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['groundTruth'][0,0][0,0]['SegmentationClass'].astype(np.uint16)
for (x,y), value in np.ndenumerate(label):
label[x,y] = self.class_map[0][value-1]
label = label.astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
# pdb.set_trace()
return label
def build_from_index(self, index, paths, dirs):
""" Build index from another index for indices given. """
if isinstance(paths, dict):
self._paths = dict((file, paths[file]) for file in index)
else:
self._paths = dict((file, paths[pos]) for pos, file in np.ndenumerate(index))
self.dirs = dirs
return index
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
def analyze_false(validData,validDataNumbers,validLabels,model):
'Calculating precision and recall for best model...'
predictions = np.squeeze((model.predict(validDataNumbers) > 0.5).astype('int32'))
c1_inds = np.where(validLabels == 1)[0]
pos_inds = np.where((predictions+validLabels) == 2)[0] #np.squeeze(predictions) == validLabels
neg_inds = np.setdiff1d(c1_inds,pos_inds)
seq_lengths = np.zeros((validData.shape[0]))
for ind,row in np.ndenumerate(validData):
seq_lengths[ind] = len(wordpunct_tokenize(row.lower().strip()))
mean_true_length = np.mean(seq_lengths[pos_inds])
mean_false_length = np.mean(seq_lengths[neg_inds])
return mean_false_length,mean_true_length
def rgb_pixeldata(self):
pixels = [np.ndarray(shape=[64, 128], dtype=np.dtype('u1'), order='C'),
np.ndarray(shape=[64, 128], dtype=np.dtype('u1'), order='C'),
np.ndarray(shape=[64, 128], dtype=np.dtype('u1'), order='C')]
for s in range(0, len(pixels)):
for (y, x), value in np.ndenumerate(pixels[s]):
if s == 0:
value = (x * 255) / 128
if s == 1:
value = (y * 255) / 64
if s == 2:
value = 255 - ((y * 255) / 64)
pixels[s].itemset((y, x), value)
return pixels
def grey_pixeldata(self):
pixels = np.ndarray(shape=[64, 128], dtype=np.dtype('f4'), order='C')
for (y, x), value in np.ndenumerate(pixels):
value = x/128.0 + y/64.0
pixels.itemset((y, x), value)
return pixels
def argmin_n(m, n):
best_values = []
best_index = []
max_value_heap = []
for index, value in np.ndenumerate(m):
if len(best_values) == n:
if -1 * value < max_value_heap[0][0]:
# value is larger than the largest value
# and the list is at capacity
continue
_, pos = heapq.heappop(max_value_heap)
best_values[pos] = value
best_index[pos] = index
heapq.heappush(max_value_heap, (-1 * value, pos))
else:
heapq.heappush(max_value_heap, (-1 * value, len(best_values)))
best_values.append(value)
best_index.append(index)
pos, best_values = zip(*sorted(enumerate(best_values), key=lambda e: e[1]))
best_index = [best_index[i] for i in pos]
return best_index
def argmax_n(m, n):
best_values = []
best_index = []
max_value_heap = []
for index, value in np.ndenumerate(m):
if len(best_values) == n:
if value < max_value_heap[0][0]:
# value is smaller than the largest value
# and the list is at capacity
continue
_, pos = heapq.heappop(max_value_heap)
best_values[pos] = value
best_index[pos] = index
heapq.heappush(max_value_heap, (value, pos))
else:
heapq.heappush(max_value_heap, (value, len(best_values)))
best_values.append(value)
best_index.append(index)
pos, best_values = zip(
*sorted(enumerate(best_values), key=lambda e: e[1], reverse=True))
best_index = [best_index[i] for i in pos]
return best_index
def __init__(self, tensors, up_label="up", right_label="right",
down_label="down", left_label="left",
copy_data=True):
self.up_label = up_label
self.right_label = right_label
self.down_label = down_label
self.left_label = left_label
if copy_data:
# Creates copies of tensors in memory
copied_tensors = []
for row in tensors:
copied_tensors.append([x.copy() for x in row])
self.data = np.array(copied_tensors)
else:
# This will not create copies of tensors in memory
# (just link to originals)
self.data = np.array(tensors)
# Every tensor will have four indices corresponding to
# "left", "right" and "up", "down" labels.
for i, x in np.ndenumerate(self.data):
if left_label not in x.labels: x.add_dummy_index(left_label)
if right_label not in x.labels: x.add_dummy_index(right_label)
if up_label not in x.labels: x.add_dummy_index(up_label)
if down_label not in x.labels: x.add_dummy_index(down_label)
# Add container emulation
def getCellParameters(self, array, fn=np.mean):
out = np.arange(len(self.cells),
dtype=float).reshape(self.opts['grid'])
s = array.shape
for (i, j), n in np.ndenumerate(out):
m = self.cells[int(n)].getMask(s)
out[i, j] = fn(array[m])
return out
def Find_HighlightedEdges(self,weight = 0):
self.ThresholdData = np.copy(self.data)
# low_values_indices = self.ThresholdData < weight # Where values are low
# self.ThresholdData[low_values_indices] = 0
# graterindices = [ (i,j) for i,j in np.ndenumerate(self.ThresholdData) if any(i > j) ]
# self.ThresholdData[graterindices[:1]] = 0
# self.ThresholdData = np.tril(self.ThresholdData)
# print self.ThresholdData, "is the data same??"
"""
test 2 highlighted edges there
"""
# np.savetxt('test2.txt', self.ThresholdData, delimiter=',', fmt='%1.4e')
self.g = nx.from_numpy_matrix(self.ThresholdData)
def finite_differences(func, inputs, func_output_shape=(), epsilon=1e-5):
"""
Computes gradients via finite differences.
derivative = (func(x+epsilon) - func(x-epsilon)) / (2*epsilon)
Args:
func: Function to compute gradient of. Inputs and outputs can be
arbitrary dimension.
inputs: Vector value to compute gradient at.
func_output_shape: Shape of the output of func. Default is
empty-tuple, which works for scalar-valued functions.
epsilon: Difference to use for computing gradient.
Returns:
Gradient vector of each dimension of func with respect to each
dimension of input.
"""
gradient = np.zeros(inputs.shape+func_output_shape)
for idx, _ in np.ndenumerate(inputs):
test_input = np.copy(inputs)
test_input[idx] += epsilon
obj_d1 = func(test_input)
assert obj_d1.shape == func_output_shape
test_input = np.copy(inputs)
test_input[idx] -= epsilon
obj_d2 = func(test_input)
assert obj_d2.shape == func_output_shape
diff = (obj_d1 - obj_d2) / (2 * epsilon)
gradient[idx] += diff
return gradient
def __init__(self, row, column):
self.rewards = np.full((row, column), -0.2)
self.states = np.ones((row, column), dtype=np.int)
self.states[1, 1] = -1
self.index_list = [index for index, x in np.ndenumerate(self.states) if x > 0]
self._init_next_state_table()
self.rewards[0, column - 1] = 1
self.rewards[0, 0] = 1
self.rewards[1, column - 1] = -1
self.terminal = [(0, column - 1), (0, 0)]