def with_walking(time_arr, mins_per_square=1.3, transfer_constant=5):
arr = time_arr.copy()
cross_footprint = sp.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).astype(bool)
diag_footprint = sp.array([[1, 0, 1],[0, 1, 0], [1, 0, 1]]).astype(bool)
arr[sp.isnan(arr)] = sp.inf
for i in range(60):
cross_arr = sp.ndimage.minimum_filter(arr, footprint=cross_footprint)
cross_arr[sp.isnan(cross_arr)] = sp.inf
cross_changes = (cross_arr != arr)
cross_arr[cross_changes] += 1*mins_per_square
diag_arr = sp.ndimage.minimum_filter(arr, footprint=diag_footprint)
diag_arr[sp.isnan(diag_arr)] = sp.inf
diag_changes = (diag_arr != arr)
diag_arr[diag_changes] += 1.4*mins_per_square
arr = sp.minimum(cross_arr, diag_arr)
arr[sp.isinf(arr)] = sp.nan
return arr + transfer_constant
python类minimum()的实例源码
def log_loss(solution, prediction, task = 'binary.classification'):
''' Log loss for binary and multiclass. '''
[sample_num, label_num] = solution.shape
eps = 1e-15
pred = np.copy(prediction) # beware: changes in prediction occur through this
sol = np.copy(solution)
if (task == 'multiclass.classification') and (label_num>1):
# Make sure the lines add up to one for multi-class classification
norma = np.sum(prediction, axis=1)
for k in range(sample_num):
pred[k,:] /= sp.maximum (norma[k], eps)
# Make sure there is a single label active per line for multi-class classification
sol = binarize_predictions(solution, task='multiclass.classification')
# For the base prediction, this solution is ridiculous in the multi-label case
# Bounding of predictions to avoid log(0),1/0,...
pred = sp.minimum (1-eps, sp.maximum (eps, pred))
# Compute the log loss
pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
if (task != 'multiclass.classification') or (label_num==1):
# The multi-label case is a bunch of binary problems.
# The second class is the negative class for each column.
neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
log_loss = pos_class_log_loss + neg_class_log_loss
# Each column is an independent problem, so we average.
# The probabilities in one line do not add up to one.
# log_loss = mvmean(log_loss)
# print('binary {}'.format(log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else:
# For the multiclass case the probabilities in one line add up one.
log_loss = pos_class_log_loss
# We sum the contributions of the columns.
log_loss = np.sum(log_loss)
#print('multiclass {}'.format(log_loss))
return log_loss
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def log_loss(solution, prediction, task = 'binary.classification'):
''' Log loss for binary and multiclass. '''
[sample_num, label_num] = solution.shape
eps = 1e-15
pred = np.copy(prediction) # beware: changes in prediction occur through this
sol = np.copy(solution)
if (task == 'multiclass.classification') and (label_num>1):
# Make sure the lines add up to one for multi-class classification
norma = np.sum(prediction, axis=1)
for k in range(sample_num):
pred[k,:] /= sp.maximum (norma[k], eps)
# Make sure there is a single label active per line for multi-class classification
sol = binarize_predictions(solution, task='multiclass.classification')
# For the base prediction, this solution is ridiculous in the multi-label case
# Bounding of predictions to avoid log(0),1/0,...
pred = sp.minimum (1-eps, sp.maximum (eps, pred))
# Compute the log loss
pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
if (task != 'multiclass.classification') or (label_num==1):
# The multi-label case is a bunch of binary problems.
# The second class is the negative class for each column.
neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
log_loss = pos_class_log_loss + neg_class_log_loss
# Each column is an independent problem, so we average.
# The probabilities in one line do not add up to one.
# log_loss = mvmean(log_loss)
# print('binary {}'.format(log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else:
# For the multiclass case the probabilities in one line add up one.
log_loss = pos_class_log_loss
# We sum the contributions of the columns.
log_loss = np.sum(log_loss)
#print('multiclass {}'.format(log_loss))
return log_loss
def open_file(maindir):
"""
Creates the digital RF reading object.
Args:
maindir (:obj:'str'): The directory where the data is located.
Returns:
drfObj (obj:"DigitalRFReader"): Digital RF Reader object.
chandict (obj:"dict"): Dictionary that holds info for the channels.
start_indx (obj:'long'): Start index in samples.
end_indx (obj:'long'): End index in samples.
"""
mainpath = os.path.expanduser(maindir)
drfObj = drf.DigitalRFReader(mainpath)
chans = drfObj.get_channels()
chandict={}
start_indx, end_indx=[0, sp.inf]
# Get channel info
for ichan in chans:
curdict = {}
curdict['sind'], curdict['eind'] = drfObj.get_bounds(ichan)
# determine the read boundrys assuming the sampling is the same.
start_indx = sp.maximum(curdict['sind'], start_indx)
end_indx = sp.minimum(curdict['eind'], end_indx)
dmetadict = drfObj.read_metadata(start_indx, end_indx, ichan)
dmetakeys = dmetadict.keys()
curdict['sps'] = dmetadict[dmetakeys[0]]['samples_per_second']
curdict['fo'] = dmetadict[dmetakeys[0]]['center_frequencies'].ravel()[0]
chandict[ichan] = curdict
return (drfObj, chandict, start_indx, end_indx)
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def log_loss(solution, prediction, task = 'binary.classification'):
''' Log loss for binary and multiclass. '''
[sample_num, label_num] = solution.shape
eps = 1e-15
pred = np.copy(prediction) # beware: changes in prediction occur through this
sol = np.copy(solution)
if (task == 'multiclass.classification') and (label_num>1):
# Make sure the lines add up to one for multi-class classification
norma = np.sum(prediction, axis=1)
for k in range(sample_num):
pred[k,:] /= sp.maximum (norma[k], eps)
# Make sure there is a single label active per line for multi-class classification
sol = binarize_predictions(solution, task='multiclass.classification')
# For the base prediction, this solution is ridiculous in the multi-label case
# Bounding of predictions to avoid log(0),1/0,...
pred = sp.minimum (1-eps, sp.maximum (eps, pred))
# Compute the log loss
pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
if (task != 'multiclass.classification') or (label_num==1):
# The multi-label case is a bunch of binary problems.
# The second class is the negative class for each column.
neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
log_loss = pos_class_log_loss + neg_class_log_loss
# Each column is an independent problem, so we average.
# The probabilities in one line do not add up to one.
# log_loss = mvmean(log_loss)
# print('binary {}'.format(log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else:
# For the multiclass case the probabilities in one line add up one.
log_loss = pos_class_log_loss
# We sum the contributions of the columns.
log_loss = np.sum(log_loss)
#print('multiclass {}'.format(log_loss))
return log_loss
def _computeBGDiff(self):
prevImg = self._imageBuffer[0].asMatrix2D()
curImg = self._imageBuffer.getMiddle().asMatrix2D()
nextImg = self._imageBuffer[-1].asMatrix2D()
delta1 = sp.absolute(curImg - prevImg) #frame diff 1
delta2 = sp.absolute(nextImg - curImg) #frame diff 2
#use element-wise minimum of the two difference images, which is what
# gets compared to threshold to yield foreground mask
return sp.minimum(delta1, delta2)
def loglossl(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
def my_logloss(act, pred):
epsilon = 1e-15
pred = K.maximum(epsilon, pred)
pred = K.minimum(1 - epsilon, pred)
ll = K.sum(act * K.log(pred) + (1 - act) * K.log(1 - pred))
ll = ll * -1.0 / K.shape(act)[0]
return ll
def logloss(act, pred):
'''
????????
:param act:
:param pred:
:return:
'''
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1 - epsilon, pred)
ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
ll = ll * -1.0 / len(act)
return ll
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def log_loss(solution, prediction, task=BINARY_CLASSIFICATION):
"""Log loss for binary and multiclass."""
[sample_num, label_num] = solution.shape
eps = 1e-15
pred = np.copy(prediction) # beware: changes in prediction occur through this
sol = np.copy(solution)
if (task == MULTICLASS_CLASSIFICATION) and (label_num > 1):
# Make sure the lines add up to one for multi-class classification
norma = np.sum(prediction, axis=1)
for k in range(sample_num):
pred[k, :] /= sp.maximum(norma[k], eps)
# Make sure there is a single label active per line for multi-class
# classification
sol = binarize_predictions(solution, task=MULTICLASS_CLASSIFICATION)
# For the base prediction, this solution is ridiculous in the
# multi-label case
# Bounding of predictions to avoid log(0),1/0,...
pred = sp.minimum(1 - eps, sp.maximum(eps, pred))
# Compute the log loss
pos_class_log_loss = -np.mean(sol * np.log(pred), axis=0)
if (task != MULTICLASS_CLASSIFICATION) or (label_num == 1):
# The multi-label case is a bunch of binary problems.
# The second class is the negative class for each column.
neg_class_log_loss = -np.mean((1 - sol) * np.log(1 - pred), axis=0)
log_loss = pos_class_log_loss + neg_class_log_loss
# Each column is an independent problem, so we average.
# The probabilities in one line do not add up to one.
# log_loss = mvmean(log_loss)
# print('binary {}'.format(log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else:
# For the multiclass case the probabilities in one line add up one.
log_loss = pos_class_log_loss
# We sum the contributions of the columns.
log_loss = np.sum(log_loss)
# print('multiclass {}'.format(log_loss))
return log_loss
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def logloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
def logloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p)))
res *= -1.0/len(y)
return res
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def logloss(act, preds):
epsilon = 1e-15
preds = sp.maximum(epsilon, preds)
preds = sp.minimum(1 - epsilon, preds)
ll = sum(act * sp.log(preds) + sp.subtract(1, act) * sp.log(sp.subtract(1, preds)))
ll = ll * -1.0 / len(act)
return ll
def logloss(act, preds):
epsilon = 1e-15
preds = sp.maximum(epsilon, preds)
preds = sp.minimum(1 - epsilon, preds)
ll = sum(act * sp.log(preds) + sp.subtract(1, act) * sp.log(sp.subtract(1, preds)))
ll = ll * -1.0 / len(act)
return ll
def logloss(act, preds):
epsilon = 1e-15
preds = sp.maximum(epsilon, preds)
preds = sp.minimum(1 - epsilon, preds)
ll = sum(act * sp.log(preds) + sp.subtract(1, act) * sp.log(sp.subtract(1, preds)))
ll = ll * -1.0 / len(act)
return ll
def nms(dets,proba, T):
dets = dets.astype("float")
if len(dets) == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = proba
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = sp.maximum(x1[i], x1[order[1:]])
yy1 = sp.maximum(y1[i], y1[order[1:]])
xx2 = sp.minimum(x2[i], x2[order[1:]])
yy2 = sp.minimum(y2[i], y2[order[1:]])
w = sp.maximum(0.0, xx2 - xx1 + 1)
h = sp.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = sp.where(ovr <= T)[0]
order = order[inds + 1]
return keep
utils.py 文件源码
项目:Tencent_Social_Advertising_Algorithm_Competition
作者: guicunbin
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def logloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
utils.py 文件源码
项目:Tencent_Social_Advertising_Algorithm_Competition
作者: guicunbin
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def self_eval(pred,train_data):
'''
:pred
:train_data ?? labels
'''
try:
labels=train_data.get_label()
except:
labels=train_data
epsilon = 1e-15
pred = np.maximum(epsilon, pred)
pred = np.minimum(1-epsilon,pred)
ll = sum(labels*np.log(pred) + (1 - labels)*np.log(1 - pred))
ll = ll * (-1.0)/len(labels)
return 'log loss', ll, False
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def logloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
def logloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res