def forward(self, input):
conv1 = self.conv1( input )
relu1 = self.relu1( conv1 )
conv2 = self.conv2( relu1 )
bn2 = self.bn2( conv2 )
relu2 = self.relu2( bn2 )
conv3 = self.conv3( relu2 )
bn3 = self.bn3( conv3 )
relu3 = self.relu3( bn3 )
conv4 = self.conv4( relu3 )
bn4 = self.bn4( conv4 )
relu4 = self.relu4( bn4 )
conv5 = self.conv5( relu4 )
return torch.sigmoid( conv5 ), [relu2, relu3, relu4]
python类sigmoid()的实例源码
def forward(self, char, word):
self.models.eval()
outs =[]
for ii,model in enumerate(self.models):
if model.opt.type_=='char':
out = t.sigmoid(model(*char))
else:
out=t.sigmoid(model(*word))
outs.append(out.detach())
for ii,model in enumerate(self.new_model):
if model.opt.type_=='char':
out = t.sigmoid(model(*char))
else:
out=t.sigmoid(model(*word))
outs.append(out)
return sum(outs)/(len(outs))
def _generate_pred_bbox(self, bbox_delta, anchors):
"""get predictions boxes from bbox_delta and anchors.
Args:
bbox_delta: (dcx, dcy, dw, dh)
shape:(H*W*num_anchor, 4)
anchor: (cx, cy, h, w)
shape:(H*W*num_anchor, 4)
Output:
output: (x_min, y_min, x_max, y_max)
"""
assert bbox_delta.dim() == anchors.dim(), "dim is not equal"
pred_xy = torch.sigmoid(bbox_delta[:, :2]) + anchors[:, :2]
pred_wh = torch.exp(bbox_delta[:, 2:]) * anchors[:, 2:]
pred_bbox = torch.cat((pred_xy, pred_wh), dim=1).contiguous()
# change (cx, xy, h, w) to (x_min, y_min, x_max, y_max)
pred_bbox[:, 0:2] = pred_bbox[:, 0:2] - pred_bbox[:, 2:4] / 2
pred_bbox[:, 2:4] = pred_bbox[:, 0:2] + pred_bbox[:, 2:4]
pred_bbox[:, 0::2] = pred_bbox[:, 0::2] / self.W
pred_bbox[:, 1::2] = pred_bbox[:, 1::2] / self.H
return pred_bbox
def _step(self, H_t, T_t, C_t, h0, h_mask, t_mask, c_mask):
s_lm1, rnns = h0, [self.rnn_h, self.rnn_t, self.rnn_c]
for l, (rnn_h, rnn_t, rnn_c) in enumerate(zip(*rnns)):
s_lm1_H = h_mask.expand_as(s_lm1) * s_lm1
s_lm1_T = t_mask.expand_as(s_lm1) * s_lm1
s_lm1_C = c_mask.expand_as(s_lm1) * s_lm1
if l == 0:
H_t = F.tanh(H_t + rnn_h(s_lm1_H))
T_t = F.sigmoid(T_t + rnn_t(s_lm1_T))
C_t = F.sigmoid(C_t + rnn_t(s_lm1_C))
else:
H_t = F.tanh(rnn_h(s_lm1_H))
T_t = F.sigmoid(rnn_t(s_lm1_T))
C_t = F.sigmoid(rnn_t(s_lm1_C))
s_l = H_t * T_t + s_lm1 * C_t
s_lm1 = s_l
return s_l
def forward(self, inputs):
current_input = inputs
for i in range(0, len(self.layers), 2):
layer, activation = self.layers[i], self.layers[i+1]
proj, linear = layer(current_input), current_input
proj = F.dropout(proj, p=self.dropout, training=self.training)
nonlinear = activation(proj[:, 0:self.input_dim])
gate = F.sigmoid(proj[:, self.input_dim:(2 * self.input_dim)])
# apply gate
current_input = gate * linear + (1 - gate) * nonlinear
return current_input
# gracefully taken from:
# https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
def forward(self, e, input, mask, scale=0):
hidden = Variable(torch.randn(self.batch_size, self.n,
self.hidden_size)).type(dtype)
if scale == 0:
e = Variable(torch.zeros(self.batch_size, self.n)).type(dtype)
Phi = self.build_Phi(e, mask)
N = torch.sum(Phi, 2).squeeze()
N += (N == 0).type(dtype) # avoid division by zero
Nh = N.unsqueeze(2).expand(self.batch_size, self.n,
self.hidden_size + self.input_size)
# Normalize inputs, important part!
mask_inp = mask.unsqueeze(2).expand_as(input)
input_n = self.Normalize_inputs(Phi, input) * mask_inp
# input_n = input * mask_inp
for i, layer in enumerate(self.layers):
hidden = layer(input_n, hidden, Phi, Nh)
hidden_p = hidden.view(self.batch_size * self.n, self.hidden_size)
scores = self.linear_b(hidden_p)
probs = torch.sigmoid(scores).view(self.batch_size, self.n) * mask
# probs has shape (batch_size, n)
return scores, probs, input_n, Phi
def forward(self, e, input, mask, scale=0):
hidden = Variable(torch.randn(self.batch_size, self.n,
self.hidden_size)).type(dtype)
if scale == 0:
e = Variable(torch.zeros(self.batch_size, self.n)).type(dtype)
Phi = self.build_Phi(e, mask)
N = torch.sum(Phi, 2).squeeze()
N += (N == 0).type(dtype) # avoid division by zero
Nh = N.unsqueeze(2).expand(self.batch_size, self.n,
self.hidden_size + self.input_size)
# Normalize inputs, important part!
mask_inp = mask.unsqueeze(2).expand_as(input)
input_n = self.Normalize_inputs(Phi, input) * mask_inp
# input_n = input * mask_inp
for i, layer in enumerate(self.layers):
hidden = layer(input_n, hidden, Phi, Nh)
hidden_p = hidden.view(self.batch_size * self.n, self.hidden_size)
scores = self.linear_b(hidden_p)
probs = torch.sigmoid(scores).view(self.batch_size, self.n) * mask
# probs has shape (batch_size, n)
return scores, probs, input_n, Phi
def forward(self, e, input, mask, scale=0):
hidden = Variable(torch.randn(self.batch_size, self.n,
self.hidden_size)).type(dtype)
if scale == 0:
e = Variable(torch.zeros(self.batch_size, self.n)).type(dtype)
Phi = self.build_Phi(e, mask)
N = torch.sum(Phi, 2).squeeze()
N += (N == 0).type(dtype) # avoid division by zero
Nh = N.unsqueeze(2).expand(self.batch_size, self.n,
self.hidden_size + self.input_size)
# Normalize inputs, important part!
mask_inp = mask.unsqueeze(2).expand_as(input)
input_n = self.Normalize_inputs(Phi, input) * mask_inp
# input_n = input * mask_inp
for i, layer in enumerate(self.layers):
hidden = layer(input_n, hidden, Phi, Nh)
hidden_p = hidden.view(self.batch_size * self.n, self.hidden_size)
scores = self.linear_b(hidden_p)
probs = torch.sigmoid(scores).view(self.batch_size, self.n) * mask
# probs has shape (batch_size, n)
return scores, probs, input_n, Phi
def g(self, tilde_z_l, u_l):
if self.use_cuda:
ones = Parameter(torch.ones(tilde_z_l.size()[0], 1).cuda())
else:
ones = Parameter(torch.ones(tilde_z_l.size()[0], 1))
b_a1 = ones.mm(self.a1)
b_a2 = ones.mm(self.a2)
b_a3 = ones.mm(self.a3)
b_a4 = ones.mm(self.a4)
b_a5 = ones.mm(self.a5)
b_a6 = ones.mm(self.a6)
b_a7 = ones.mm(self.a7)
b_a8 = ones.mm(self.a8)
b_a9 = ones.mm(self.a9)
b_a10 = ones.mm(self.a10)
mu_l = torch.mul(b_a1, torch.sigmoid(torch.mul(b_a2, u_l) + b_a3)) + \
torch.mul(b_a4, u_l) + \
b_a5
v_l = torch.mul(b_a6, torch.sigmoid(torch.mul(b_a7, u_l) + b_a8)) + \
torch.mul(b_a9, u_l) + \
b_a10
hat_z_l = torch.mul(tilde_z_l - mu_l, v_l) + mu_l
return hat_z_l
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def forward(self, image_pairs: Variable) -> Variable:
arc_out = self.arc(image_pairs)
d1 = F.elu(self.dense1(arc_out))
decision = torch.sigmoid(self.dense2(d1))
return decision
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0)
.expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
f, i, o, g = torch.split(wh_b + wi,
split_size=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
return h_1, c_1
def forward(self, input_, hx, time):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
time: The current timestep value, which is used to
get appropriate running statistics.
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0)
.expand(batch_size, *self.bias.size()))
wh = torch.mm(h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
bn_wh = self.bn_hh(wh, time=time)
bn_wi = self.bn_ih(wi, time=time)
f, i, o, g = torch.split(bn_wh + bn_wi + bias_batch,
split_size=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(self.bn_c(c_1, time=time))
return h_1, c_1
def forward(self, input):
return torch.sigmoid(input)
def forward(self, input, target):
return F.binary_cross_entropy(torch.sigmoid(input), target,
self.weight, self.size_average)
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0)
.expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
f, i, o, g = torch.split(wh_b + wi,
split_size=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
return h_1, c_1
def forward(self, input_, hx, time):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
time: The current timestep value, which is used to
get appropriate running statistics.
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0)
.expand(batch_size, *self.bias.size()))
wh = torch.mm(h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
bn_wh = self.bn_hh(wh, time=time)
bn_wi = self.bn_ih(wi, time=time)
f, i, o, g = torch.split(bn_wh + bn_wi + bias_batch,
split_size=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(self.bn_c(c_1, time=time))
return h_1, c_1
def forward(self, u, x, bias, init=None, mask_h=None):
bidir = 2 if self.bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k//2 if self.bidirectional else k
u = u.view(length, batch, d, k_)
cur = x.new(batch, d).zero_() if init is None else init
size = (length, batch, d*bidir) if x.dim() == 3 else (batch, d*bidir)
bias1, bias2 = bias.split(self.d_out)
u_ = [u.select(-1, i) for i in range(0, k_)]
h = []
x_ = x if k_ == 3 else u_[3]
for i in range(0, length):
u0i, u1i, u2i = u_[0][i], u_[1][i], u_[2][i]
g1 = torch.sigmoid(u1i + bias1)
g2 = torch.sigmoid(u2i + bias2)
cur = (cur - u0i)*g1 + u0i
if self.activation_type == 1:
val = torch.tanh(cur)
elif self.activation_type == 2:
val = torch.relu(cur)
if mask_h is not None:
val = val*mask_h
xi = x_[i]
h.append((val - xi)*g2 + xi)
if self.bidirectional:
assert False
else:
last_hidden = cur
h = torch.stack(h)
return h, last_hidden
def forward(self, char, word):
weights = t.nn.functional.softmax(self.weights)
outs =[]
for ii,model in enumerate(self.models):
if model.opt.type_=='char':
out = t.sigmoid(model(*char))
else:
out=t.sigmoid(model(*word))
out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
outs.append(out)
# outs = [t.sigmoid(model(title,content))*weight for model in self.models]
# outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
return sum(outs)
def forward(self, title, content):
weights = t.nn.functional.softmax(self.weights)
outs =[]
for ii,model in enumerate(self.models):
out = t.sigmoid(model(title,content))
out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
outs.append(out)
# outs = [t.sigmoid(model(title,content))*weight for model in self.models]
# outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
return sum(outs)
def forward(self, char, word):
weights = t.nn.functional.softmax(self.weights)
outs =[]
for ii,model in enumerate(self.models):
if model.opt.type_=='char':
out = t.sigmoid(model(*char))
else:
out=t.sigmoid(model(*word))
out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
outs.append(out)
# outs = [t.sigmoid(model(title,content))*weight for model in self.models]
# outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
return sum(outs)
def forward(self, char, word):
weights = t.nn.functional.softmax(self.weights)
outs =[]
for ii,model in enumerate(self.models):
if model.opt.type_=='char':
out = t.sigmoid(model(*char))
else:
out=t.sigmoid(model(*word))
if self.opt.static: out = out.detach()
out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
outs.append(out)
# outs = [t.sigmoid(model(title,content))*weight for model in self.models]
# outs = [model(title,content)*weight.view(1,1).expand(title.size(0),self.opt.num_classes).mm(self.label_weight) for model,weight in zip(self.models,self.weight)]
return sum(outs)
def forward(self, char, word):
weights = t.nn.functional.softmax(self.weights)
outs =[]
for ii,model in enumerate(self.models):
if model.opt.type_=='char':
out = t.sigmoid(model(*char))
else:
out=t.sigmoid(model(*word))
out = out*(weights[:,ii].contiguous().view(1,-1).expand_as(out))
outs.append(out)
return sum(outs)
def __init__(self,data_root,labels_file):
self.data_files_path=glob(data_root+"*val.pth")
self.model_num=len(self.data_files_path)
self.label_file_path=labels_file
self.data=t.zeros(100,1999*self.model_num)
for i in range(self.model_num):
self.data[:,i*1999:i*1999+1999]=t.sigmoid(t.load(self.data_files_path[i]).float()[:100])
print self.data.size()
def forward(self, in_out_pairs):
# input is batch_size*2 int Variable
i = self.input_embeddings(in_out_pairs[:, 0])
o = self.output_embeddings(in_out_pairs[:, 1])
# raw activations, NCE_Loss handles the sigmoid (we need to know classes to know the sign to apply)
return (i * o).sum(1).squeeze()
def forward(self, activations, targets):
# targets are -1.0 or 1.0, 1-d Variable
# likelihood assigned by the model to pos and neg samples is given by the sigmoid, with the sign
# determined by the class.
# negative log likelihood
return log(sigmoid(activations * targets)).sum() * -1.0
def forward(self, input):
return torch.sigmoid(input)
def forward(self, input, target):
return F.binary_cross_entropy(torch.sigmoid(input), target,
self.weight, self.size_average)
def forward(self, x, targets=None, num_iter=0):
conv1s = self.conv1s(x)
conv2 = self.conv2(conv1s)
conv3 = self.conv3(conv2)
conv1s_reorg = self.conv_reorg(conv1s)
conv1s_reorg = self.reorg(conv1s_reorg)
cat_1_3 = torch.cat([conv1s_reorg, conv3], 1)
conv4 = self.conv4(cat_1_3)
output = self.conv5(conv4)
batchsize, _, self.H, self.W = output.size()
# output shape: (batchsize, H*W*num_anchor, (num_class+num_loc))
output = output.permute(0, 2, 3, 1).contiguous().view(batchsize, -1, (self.num_class+self.num_loc))
bbox_delta = output[:, :, :4].contiguous()
iou_pred = F.sigmoid(output[:, :, 4]).contiguous()
class_pred = output[:, :, 5:].contiguous()
prob_pred = F.softmax(class_pred.view(-1, self.num_class)).view_as(class_pred)
pred = (bbox_delta, iou_pred, prob_pred)
self.anchors_cfg[:, 0::2] = self.anchors_cfg[:, 0::2] / self.W
self.anchors_cfg[:, 1::2] = self.anchors_cfg[:, 1::2] / self.H
if self.phase == 'train':
self._calc_loss(pred, targets, num_iter)
else:
assert batchsize == 1, "now only support batchsize=1"
anchors = self._generate_anchors()
bbox_pred = self._generate_pred_bbox(bbox_delta[0], anchors)
output = self.detect(bbox_pred, iou_pred.view(-1), prob_pred.view(-1, self.num_class))
return output
def predict(self, user_ids, item_ids=None):
"""
Make predictions: given a user id, compute the recommendation
scores for items.
Parameters
----------
user_ids: int or array
If int, will predict the recommendation scores for this
user for all items in item_ids. If an array, will predict
scores for all (user, item) pairs defined by user_ids and
item_ids.
item_ids: array, optional
Array containing the item ids for which prediction scores
are desired. If not supplied, predictions for all items
will be computed.
Returns
-------
predictions: np.array
Predicted scores for all items in item_ids.
"""
self._check_input(user_ids, item_ids, allow_items_none=True)
self._net.train(False)
user_ids, item_ids = _predict_process_ids(user_ids, item_ids,
self._num_items,
self._use_cuda)
out = self._net(user_ids, item_ids)
if self._loss == 'poisson':
out = torch.exp(out)
elif self._loss == 'logistic':
out = torch.sigmoid(out)
return cpu(out.data).numpy().flatten()