def dcg_at_k(r, k):
r = np.asfarray(r)[:k]
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
python类asfarray()的实例源码
def test_asfarray_none(self, level=rlevel):
# Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
def next_batch(self, batch_size=10, start=-1, end=-1, nth=-1):
data = np.empty((0, self._data_len), float)
label = np.empty((0, self._class_len), int)
with open(PACK_PATH+"/dataset/"+str(self._who_am_i)+".csv") as f:
lines = f.readlines()
if(nth == -1):
if((start == -1) and (end == -1)):
datas = random.sample(lines, batch_size)
else:
datas = lines[start:end]
else:
datas = []
datas.append(lines[nth])
for d in datas:
sv_data = d.split(',')
tmp_label = sv_data[0]
tmp_data = sv_data[1:len(sv_data)-1]
tmp_data = np.asarray(tmp_data).reshape((1, len(tmp_data)))
label = np.append(label, np.eye(self._class_len)[int(np.asfarray(tmp_label))].reshape(1, self._class_len), axis=0)
data = np.append(data, tmp_data, axis=0)
return data, label
def main1():
#target_names = np.array(args.names)
X, Y = getFeatureAndPath(args.image_dir)
X = np.asfarray(X,dtype='float')
#X_pca = PCA(n_components=128).fit_transform(X)
X_pca = X
tsne = TSNE(n_components=2, init='random', random_state=0)
X_r = tsne.fit_transform(X_pca)
imgPlot(X_r,Y)
def dcg_at_k(r, k):
r = np.asfarray(r)[:k]
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
def dcg_at_k(r, k):
r = np.asfarray(r)[:k]
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
def recall_at_k(r, k, all_pos_num):
r = np.asfarray(r)[:k]
return np.sum(r) / all_pos_num
def __init__(self, ci, cn, transf):
Layer.__init__(self, ci, cn, cn, {'w': (cn, ci), 'b': cn})
self.transf = transf
if not hasattr(transf, 'out_minmax'):
test = np.asfarry([-1e100, -100, -10, -1, 0, 1, 10, 100, 1e100])
val = self.transf(test)
self.out_minmax = np.array([val.min(), val.max()] * self.co)
else:
self.out_minmax = np.asfarray([transf.out_minmax] * self.co)
# default init function
self.initf = init.initwb_reg
#self.initf = init.initwb_nw
self.s = np.zeros(self.cn)
def __init__(self, ci, cn, transf, max_iter, delta):
Layer.__init__(self, ci, cn, cn, {'w': (cn, ci), 'b': cn})
self.max_iter = max_iter
self.delta = delta
self.transf = transf
self.outs = []
if not hasattr(transf, 'out_minmax'):
test = np.asfarry([-1e100, -100, -10, -1, 0, 1, 10, 100, 1e100])
val = self.transf(test)
self.out_minmax = np.array([val.min(), val.max()] * self.co)
else:
self.out_minmax = np.asfarray([transf.out_minmax] * self.co)
self.initf = None
self.s = np.zeros(self.cn)
def newlvq(minmax, cn0, pc):
"""
Create a learning vector quantization (LVQ) network
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
cn0: int
Number of neurons in input layer
pc: list
List of percent, sum(pc) == 1
:Returns:
net: Net
:Example:
>>> # create network with 2 inputs,
>>> # 2 layers and 10 neurons in each layer
>>> net = newlvq([[-1, 1], [-1, 1]], 10, [0.6, 0.4])
"""
pc = np.asfarray(pc)
assert sum(pc) == 1
ci = len(minmax)
cn1 = len(pc)
assert cn0 > cn1
layer_inp = layer.Competitive(ci, cn0)
layer_out = layer.Perceptron(cn0, cn1, trans.PureLin())
layer_out.initf = None
layer_out.np['b'].fill(0.0)
layer_out.np['w'].fill(0.0)
inx = np.floor(cn0 * pc.cumsum())
for n, i in enumerate(inx):
st = 0 if n == 0 else inx[n - 1]
layer_out.np['w'][n][st:i].fill(1.0)
net = Net(minmax, cn1, [layer_inp, layer_out],
[[-1], [0], [1]], train.train_lvq, error.MSE())
return net
def __init__(self, x):
x = np.asfarray(x)
if x.ndim != 2:
raise ValueError('x mast have 2 dimensions')
min = np.min(x, axis=0)
dist = np.max(x, axis=0) - min
min.shape = 1, min.size
dist.shape = 1, dist.size
self.min = min
self.dist = dist
def __call__(self, x):
x = np.asfarray(x)
res = (x - self.min) / self.dist
return res
def renorm(self, x):
x = np.asfarray(x)
res = x * self.dist + self.min
return res
#------------------------------------------------------------
def step(self, inp):
"""
Simulated step
:Parameters:
inp: array like
Input vector
:Returns:
out: array
Output vector
"""
#TODO: self.inp=np.asfarray(inp)?
self.inp = inp
for nl, nums in enumerate(self.connect):
if len(nums) > 1:
signal = []
for ns in nums:
s = self.layers[ns].out if ns != -1 else inp
signal.append(s)
signal = np.concatenate(signal)
else:
ns = nums[0]
signal = self.layers[ns].out if ns != -1 else inp
if nl != len(self.layers):
self.layers[nl].step(signal)
self.out = signal
return self.out
def test_asfarray_none(self, level=rlevel):
# Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
def SNR(img1, img2=None, bg=None,
noise_level_function=None,
constant_noise_level=False,
imgs_to_be_averaged=False):
'''
Returns a signal-to-noise-map
uses algorithm as described in BEDRICH 2016 JPV (not jet published)
:param constant_noise_level: True, to assume noise to be constant
:param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
'''
# dark current subtraction:
img1 = np.asfarray(img1)
if bg is not None:
img1 = img1 - bg
# SIGNAL:
if img2 is not None:
img2_exists = True
img2 = np.asfarray(img2) - bg
# signal as average on both images
signal = 0.5 * (img1 + img2)
else:
img2_exists = False
signal = img1
# denoise:
signal = median_filter(signal, 3)
# NOISE
if constant_noise_level:
# CONSTANT NOISE
if img2_exists:
d = img1 - img2
# 0.5**0.5 because of sum of variances
noise = 0.5**0.5 * np.mean(np.abs((d))) * F_RMS2AAD
else:
d = (img1 - signal) * F_NOISE_WITH_MEDIAN
noise = np.mean(np.abs(d)) * F_RMS2AAD
else:
# NOISE LEVEL FUNCTION
if noise_level_function is None:
noise_level_function, _ = oneImageNLF(img1, img2, signal)
noise = noise_level_function(signal)
noise[noise < 1] = 1 # otherwise SNR could be higher than image value
if imgs_to_be_averaged:
# SNR will be higher if both given images are supposed to be averaged:
# factor of noise reduction if SNR if for average(img1, img2):
noise *= 0.5**0.5
# BACKGROUND estimation and removal if background not given:
if bg is None:
bg = getBackgroundLevel(img1)
signal -= bg
snr = signal / noise
# limit to 1, saying at these points signal=noise:
snr[snr < 1] = 1
return snr
def SNRaverage(snr, method='average', excludeBackground=True,
checkBackground=True,
backgroundLevel=None):
'''
average a signal-to-noise map
:param method: ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal
:type method: str
:param checkBackground: check whether there is actually a background level to exclude
:type checkBackground: bool
:returns: averaged SNR as float
'''
if excludeBackground:
# get background level
if backgroundLevel is None:
try:
f = FitHistogramPeaks(snr).fitParams
if checkBackground:
if not hasBackground(f):
excludeBackground = False
if excludeBackground:
backgroundLevel = getSignalMinimum(f)
except (ValueError, AssertionError):
backgroundLevel = snr.min()
if excludeBackground:
snr = snr[snr >= backgroundLevel]
if method == 'RMS':
avg = (snr**2).mean()**0.5
elif method == 'average':
avg = snr.mean()
# if np.isnan(avg):
# avg = np.nanmean(snr)
elif method == 'median':
avg = np.median(snr)
# if np.isnan(avg):
# avg = np.nanmedian(snr)
elif method == 'X75':
r = (snr.min(), snr.max())
hist, bin_edges = np.histogram(snr, bins=2 * int(r[1] - r[0]), range=r)
hist = np.asfarray(hist) / hist.sum()
cdf = np.cumsum(hist)
i = np.argmax(cdf > 0.25)
avg = bin_edges[i]
else:
raise NotImplemented("given SNR average doesn't exist")
return avg
def imagelist_to_dataset(image_dir, image_lists, imsize=28):
master_key, sub_key = key_from_dictionary(image_lists)
print("\n***** Make image list *****")
result_dir = "dataset/"
if not os.path.exists(result_dir):
os.makedirs(result_dir)
else:
shutil.rmtree(result_dir)
os.makedirs(result_dir)
x_train = []
t_train = np.empty((0), int)
x_test = []
t_test = np.empty((0), int)
x_valid = []
t_valid = np.empty((0), int)
for key_i in [0, 1, 3]:
if key_i == 0:
result_name = "train"
elif key_i == 1:
result_name = "test"
else:
result_name = "valid"
sys.stdout.write(" Make \'"+result_name+" list\'...")
# m: class
for m in master_key:
for i in range(len(image_lists[m][sub_key[key_i]])):
# m: category
# image_lists[m][sub_key[key_i]][i]: image name
image_path = "./"+image_dir+"/"+m+"/"+image_lists[m][sub_key[key_i]][i]
# Read jpg images and resizing it.
origin_image = cv2.imread(image_path)
resized_image = cv2.resize(origin_image, (imsize, imsize))
grayscale_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
image_save(result_dir+"origin/"+result_name+"/", image_lists[m][sub_key[key_i]][i], origin_image)
image_save(result_dir+"resize/"+result_name+"/", image_lists[m][sub_key[key_i]][i], resized_image)
image_save(result_dir+"gray/"+result_name+"/", image_lists[m][sub_key[key_i]][i], grayscale_image)
if key_i == 0:
x_train.append(resized_image)
t_train = np.append(t_train, np.array([int(np.asfarray(m))]), axis=0)
elif key_i == 1:
x_test.append(resized_image)
t_test = np.append(t_test, np.array([int(np.asfarray(m))]), axis=0)
else:
x_valid.append(resized_image)
t_valid = np.append(t_valid, np.array([int(np.asfarray(m))]), axis=0)
print(" complete.")
#print(" x_train shape: " + str(np.array(x_train).shape))
#print(" t_train shape: " + str(np.array(t_train).shape))
#print(" x_test shape: " + str(np.array(x_test).shape))
#print(" t_test shape: " + str(np.array(t_test).shape))
x_train = np.asarray(x_train)
t_train = np.asarray(t_train)
x_test = np.asarray(x_test)
t_test = np.asarray(t_test)
return (x_train, t_train), (x_test, t_test), len(master_key)
def newhop(target, transf=None, max_init=10, delta=0):
"""
Create a Hopfield recurrent network
:Parameters:
target: array like (l x net.co)
train target patterns
transf: func (default HardLims)
Activation function
max_init: int (default 10)
Maximum of recurrent iterations
delta: float (default 0)
Minimum difference between 2 outputs for stop recurrent cycle
:Returns:
net: Net
:Example:
>>> net = newhem([[-1, -1, -1], [1, -1, 1]])
>>> output = net.sim([[-1, 1, -1], [1, -1, 1]])
"""
target = np.asfarray(target)
assert target.ndim == 2
ci = len(target[0])
if transf is None:
transf = trans.HardLims()
l = layer.Reccurent(ci, ci, transf, max_init, delta)
w = l.np['w']
b = l.np['b']
# init weight
for i in range(ci):
for j in range(ci):
if i == j:
w[i, j] = 0.0
else:
w[i, j] = np.sum(target[:, i] * target[:, j]) / ci
b[i] = 0.0
l.initf = None
minmax = transf.out_minmax if hasattr(transf, 'out_minmax') else [-1, 1]
net = Net([minmax] * ci, ci, [l], [[-1], [0]], None, None)
return net
def newhem(target, transf=None, max_iter=10, delta=0):
"""
Create a Hemming recurrent network with 2 layers
:Parameters:
target: array like (l x net.co)
train target patterns
transf: func (default SatLinPrm(0.1, 0, 10))
Activation function of input layer
max_init: int (default 10)
Maximum of recurrent iterations
delta: float (default 0)
Minimum dereference between 2 outputs for stop recurrent cycle
:Returns:
net: Net
:Example:
>>> net = newhop([[-1, -1, -1], [1, -1, 1]])
>>> output = net.sim([[-1, 1, -1], [1, -1, 1]])
"""
target = np.asfarray(target)
assert target.ndim == 2
cn = target.shape[0]
ci = target.shape[1]
if transf is None:
transf = trans.SatLinPrm(0.1, 0, 10)
layer_inp = layer.Perceptron(ci, cn, transf)
# init input layer
layer_inp.initf = None
layer_inp.np['b'][:] = float(ci) / 2
for i, tar in enumerate(target):
layer_inp.np['w'][i][:] = tar / 2
layer_out = layer.Reccurent(cn, cn, trans.SatLinPrm(1, 0, 1e6), max_iter, delta)
# init output layer
layer_out.initf = None
layer_out.np['b'][:] = 0
eps = - 1.0 / cn
for i in range(cn):
layer_out.np['w'][i][:] = [eps] * cn
layer_out.np['w'][i][i] = 1
# create network
minmax = [[-1, 1]] * ci
layers = [layer_inp, layer_out]
connect = [[-1], [0], [1]]
net = Net(minmax, cn, layers, connect, None, None)
return net