def _process_input(self, x):
"""Apply logistic and softmax activations to input tensor
"""
logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x))
(batch, w, h, channels) = x.get_shape()
x_temp = K.permute_dimensions(x, (3, 0, 1, 2))
x_t = []
for i in range(self.num):
k = self._entry_index(i, 0)
x_t.extend([
logistic_activate(K.gather(x_temp, (k, k + 1))), # 0
K.gather(x_temp, (k + 2, k + 3))])
if self.background:
x_t.append(K.gather(x_temp, (k + 4,)))
else:
x_t.append(logistic_activate(K.gather(x_temp, (k + 4,))))
x_t.append(
softmax(
K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))),
axis=0))
x_t = K.concatenate(x_t, axis=0)
return K.permute_dimensions(x_t, (1, 2, 3, 0))
评论列表
文章目录