def setup_reparam_mask(self, N):
while True:
mask = torch.bernoulli(0.30 * torch.ones(N))
if torch.sum(mask) < 0.40 * N and torch.sum(mask) > 0.5:
return mask
python类bernoulli()的实例源码
def setup_reparam_mask(self, n):
while True:
mask = torch.bernoulli(0.30 * torch.ones(n))
if torch.sum(mask) < 0.40 * n and torch.sum(mask) > 0.5:
return mask
# for doing model sampling in different sequential orders
def sample(self):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.sample`.
"""
return Variable(torch.bernoulli(self.ps.data))
def forward(self, input):
"""
x should be [seq_len][batch_size]
"""
seq_len = input.size()[0]
batch_size = input.size()[1]
# we reuse initial_state and initial_cell, if they havent changed
# since last time.
if self.initial_state is None or self.initial_state.size()[1] != batch_size:
self.initial_state = autograd.Variable(torch.zeros(
self.num_layers * 2,
batch_size,
self.num_hidden
))
self.initial_cell = autograd.Variable(torch.zeros(
self.num_layers * 2,
batch_size,
self.num_hidden
))
if input.is_cuda:
self.initial_state = self.initial_state.cuda()
self.initial_cell = self.initial_cell.cuda()
x = self.embedding(input)
x, _ = self.lstm(x, (self.initial_state, self.initial_cell))
x = self.linear(x)
x = F.sigmoid(x)
rationale_selected_node = torch.bernoulli(x)
rationale_selected = rationale_selected_node.view(seq_len, batch_size)
rationale_lengths = rationale_selected.sum(dim=0).int()
max_rationale_length = rationale_lengths.max()
# if self.rationales is None or self.rationales.shape[1] != batch_size:
rationales = torch.LongTensor(max_rationale_length.data[0], batch_size)
if input.is_cuda:
rationales = rationales.cuda()
rationales.fill_(self.pad_id)
for n in range(batch_size):
this_len = rationale_lengths[n].data[0]
rationales[:this_len, n] = torch.masked_select(
input[:, n].data, rationale_selected[:, n].data.byte()
)
return rationale_selected_node, rationale_selected, rationales, rationale_lengths
def sample_mask(self):
keep = 1.0 - self.dropout
self.mask = V(th.bernoulli(T(1, self.hidden_size).fill_(keep)))
def draw(self, N):
'''
Draw N samples from multinomial
'''
K = self.alias.size(0)
kk = torch.LongTensor(np.random.randint(0,K, size=N))
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj
def setUp(self):
self.x = torch.rand(10, 14, 2)
self.t = torch.rand(10, 14, 2)
self.v = torch.bernoulli(torch.rand(10, 14, 1)).expand(10, 14, 2).clone()
def schedule_sampling(self, prev, dec_out):
"""
Resample n inputs to next iteration from the model itself. N is itself
sampled from a bernoulli independently for each example in the batch
with weights equal to the model's variable self.scheduled_rate.
Parameters:
-----------
- prev: torch.LongTensor(batch_size)
- dec_out: torch.Tensor(batch_size x hid_dim)
Returns: partially resampled input
--------
- prev: torch.LongTensor(batch_size)
"""
prev, dec_out = prev.data, dec_out.data # don't register computation
keep_mask = torch.bernoulli(
torch.zeros_like(prev).float() + self.exposure_rate) == 1
# return if no sampling is necessary
if len(keep_mask.nonzero()) == len(prev):
return prev
sampled = self.decoder.project(
Variable(dec_out, volatile=True)).max(1)[1].data
if keep_mask.nonzero().dim() == 0: # return all sampled
return sampled
keep_mask = keep_mask.nonzero().squeeze(1)
sampled[keep_mask] = prev[keep_mask]
return sampled
def word_dropout_mask(X, dropout_rate, reserved_codes=()):
"""
Computes a binary mask across batch examples based on a
bernoulli distribution with mean equal to dropout.
"""
probs = torch.zeros_like(X).float() + dropout_rate
# zero reserved_codes (avoid dropping reserved symbols)
if len(reserved_codes) > 0:
probs[sum((X == x) for x in reserved_codes)] = 0
# return binary mask
return torch.bernoulli(probs).byte()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
return torch.bernoulli(self.probs.expand(shape))
def forward(self, X):
X = super(Dropout, self).forward(X)
eps = torch.Tensor(*X.size())
eps.fill_(self.p)
eps = Variable(torch.bernoulli(eps))
return X * eps
def sample_mask(self):
keep = 1.0 - self.dropout
self.mask = V(th.bernoulli(T(1, self.hidden_size).fill_(keep)))
utils.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def v_to_h(v, W, h_bias):
# p_h = F.sigmoid(v.mm(self.W.t()) + self.h_bias.repeat(v.size()[0],1))
p_h = torch.sigmoid(F.linear(v,W,h_bias))
h = torch.bernoulli(p_h)
return p_h,h
utils.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def h_to_v(h, W, v_bias):
p_v = torch.sigmoid(F.linear(h,W.t(),v_bias))
v = torch.bernoulli(p_v)
return p_v,v
utils.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 38
收藏 0
点赞 0
评论 0
def generate(dbn, iteration = 1, prop_input = None, annealed = False, n = 0):
if not type(prop_input) == type(None):
prop_v = Variable(torch.from_numpy(prop_input).type(torch.FloatTensor))
for i in range(dbn.n_layers-1):
prop_v = dbn.rbm_layers[i].v_to_h(prop_v)[0]
prop = prop_v.data.mean()
else:
prop = 0.5
h = torch.bernoulli((dbn.rbm_layers[-1].h_bias *0 + prop).view(1,-1).repeat(n, 1))
p_v, v = dbn.rbm_layers[-1].h_to_v(h)
if not annealed:
for _ in range(iteration):
p_h, h = dbn.rbm_layers[-1].v_to_h(v)
p_v, v = dbn.rbm_layers[-1].h_to_v(h)
else:
for temp in np.linspace(3,0.6,25):
for i in dbn.rbm_layers[-1].parameters():
i.data *= 1.0/temp
for _ in range(iteration):
p_h, h = dbn.rbm_layers[-1].v_to_h(v)
p_v, v = dbn.rbm_layers[-1].h_to_v(h)
for i in dbn.rbm_layers[-1].parameters():
i.data *= temp
for i in range(dbn.n_layers-1):
p_v, v = dbn.rbm_layers[-2-i].h_to_v(v)
return v
model_DBN.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 54
收藏 0
点赞 0
评论 0
def sample_from_p(self,p):
return torch.bernoulli(p)
utils.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 43
收藏 0
点赞 0
评论 0
def generate(dbm, iteration = 1, n = 1):
even_layer = []
odd_layer = []
for i in range(0, dbm.n_odd_layers):
odd_layer.append(torch.bernoulli((dbm.bias[2*i+1]*0+0.5).view(1,-1).repeat(n, 1)))
for _ in range(iteration):
p_even_layer, even_layer = dbm.odd_to_even(odd_layer)
p_odd_layer, odd_layer = dbm.even_to_odd(even_layer)
return even_layer[0]
model_DBM.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def odd_to_even(self, odd_input = None):
even_p_output= []
even_output= []
for i in range(self.n_even_layers):
if i == 0:
even_p_output.append(torch.sigmoid(F.linear(odd_input[i],self.W[2*i].t(),self.bias[2*i])))
elif (self.n_even_layers > self.n_odd_layers) and i == self.n_even_layers - 1:
even_p_output.append(torch.sigmoid(F.linear(odd_input[i-1],self.W[2*i-1],self.bias[2*i])))
else:
even_p_output.append(torch.sigmoid(F.linear(odd_input[i-1],self.W[2*i-1],self.bias[2*i]) + F.linear(odd_input[i],self.W[2*i].t())))
for i in even_p_output:
even_output.append(torch.bernoulli(i))
return even_p_output, even_output
model_DBM.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 52
收藏 0
点赞 0
评论 0
def even_to_odd(self, even_input = None):
odd_p_output = []
odd_output = []
for i in range(self.n_odd_layers):
if (self.n_even_layers == self.n_odd_layers) and i == self.n_odd_layers - 1:
odd_p_output.append(torch.sigmoid(F.linear(even_input[i],self.W[2*i],self.bias[2*i+1])))
else:
odd_p_output.append(torch.sigmoid(F.linear(even_input[i],self.W[2*i],self.bias[2*i+1]) + F.linear(even_input[i+1],self.W[2*i+1].t())))
for i in odd_p_output:
odd_output.append(torch.bernoulli(i))
return odd_p_output, odd_output
model_DBM.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def forward(self, v_input, k_positive = 10, k_negative=10, greedy = True, ith_layer = 0, CD_k = 10): #for greedy training
if greedy:
v = v_input
for ith in range(ith_layer):
p_v, v = self.rbm_layers[ith].v_to_h(v)
v, v_ = self.rbm_layers[ith_layer](v, CD_k = CD_k)
return v, v_
v = v_input
even_layer = [v]
odd_layer = []
for i in range(1, self.n_even_layers):
even_layer.append(torch.bernoulli(torch.sigmoid(self.bias[2*i].repeat(v.size()[0],1))))
for _ in range(k_positive):
p_odd_layer, odd_layer = self.even_to_odd(even_layer)
p_even_layer, even_layer = self.odd_to_even(odd_layer)
even_layer[0] = v
positive_phase_even = [i.detach().clone() for i in even_layer]
positive_phase_odd = [i.detach().clone() for i in odd_layer]
for i, d in enumerate(positive_phase_odd):
positive_phase_even.insert(2*i+1, positive_phase_odd[i])
positive_phase = positive_phase_even
for _ in range(k_negative):
p_odd_layer, odd_layer = self.even_to_odd(even_layer)
p_even_layer, even_layer = self.odd_to_even(odd_layer)
negative_phase_even = [i.detach().clone() for i in even_layer]
negative_phase_odd = [i.detach().clone() for i in odd_layer]
for i, d in enumerate(negative_phase_odd):
negative_phase_even.insert(2*i+1, negative_phase_odd[i])
negative_phase = negative_phase_even
return positive_phase, negative_phase
utils.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def generate(rbm, iteration = 1, p = 0.5, n = 1):
v = torch.bernoulli((rbm.v_bias *0 + p).view(1,-1).repeat(n, 1))
for _ in range(iteration):
p_h, h = rbm.v_to_h(v)
p_v, v = rbm.h_to_v(h)
return v
model_RBM.py 文件源码
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch
作者: wmingwei
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def sample_from_p(self,p):
return torch.bernoulli(p)
def train_vae(epoch, args, train_loader, model, optimizer):
# set loss to 0
train_loss = 0
train_re = 0
train_kl = 0
# set model in training mode
model.train()
# start training
if args.warmup == 0:
beta = 1.
else:
beta = 1.* (epoch-1) / args.warmup
if beta > 1.:
beta = 1.
print('beta: {}'.format(beta))
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# dynamic binarization
if args.dynamic_binarization:
x = torch.bernoulli(data)
else:
x = data
# reset gradients
optimizer.zero_grad()
# forward pass
x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = model.forward(x)
# loss function
RE = log_Bernoulli(data, x_mean, average=False)
# KL
log_p_z = log_Normal_standard(z_q, dim=1)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = beta * (- torch.sum(log_p_z - log_q_z) )
loss = (-RE + KL) / data.size(0)
# backward pass
loss.backward()
# optimization
optimizer.step()
train_loss += loss.data[0]
train_re += (-RE / data.size(0)).data[0]
train_kl += (KL / data.size(0)).data[0]
# calculate final loss
train_loss /= len(train_loader) # loss function already averages over batch size
train_re /= len(train_loader) # re already averages over batch size
train_kl /= len(train_loader) # kl already averages over batch size
return model, train_loss, train_re, train_kl
# ======================================================================================================================
def train_vae_VPflow(epoch, args, train_loader, model, optimizer):
# set loss to 0
train_loss = 0
train_re = 0
train_kl = 0
# set model in training mode
model.train()
# start training
if args.warmup == 0:
beta = 1.
else:
beta = 1.* (epoch-1) / args.warmup
if beta > 1.:
beta = 1.
print('beta: {}'.format(beta))
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# dynamic binarization
if args.dynamic_binarization:
x = torch.bernoulli(data)
else:
x = data
# reset gradients
optimizer.zero_grad()
# forward pass
x_mean, x_logvar, z_0, z_T, z_q_mean, z_q_logvar = model.forward(x)
# loss function
RE = log_Bernoulli(data, x_mean, average=False)
# KL
log_p_z = log_Normal_standard(z_T, dim=1)
log_q_z = log_Normal_diag(z_0, z_q_mean, z_q_logvar, dim=1)
KL = beta * (- torch.sum(log_p_z - log_q_z) )
loss = (-RE + KL) / data.size(0)
# backward pass
loss.backward()
# optimization
optimizer.step()
train_loss += loss.data[0]
train_re += (-RE / data.size(0)).data[0]
train_kl += (KL / data.size(0)).data[0]
# calculate final loss
train_loss /= len(train_loader) # loss function already averages over batch size
train_re /= len(train_loader) # re already averages over batch size
train_kl /= len(train_loader) # kl already averages over batch size
return model, train_loss, train_re, train_kl
def form_torch_audio_dataset(SPCSabs, SPCSphase, lens, arguments, loadertype):
SPCSabs = torch.from_numpy(np.array(SPCSabs))
if loadertype == 'mixture':
SPCSphase = torch.from_numpy(np.array(SPCSphase))
dataset = TensorDataset(data_tensor=SPCSabs,
target_tensor=SPCSphase,
lens=lens)
elif loadertype == 'source':
if arguments.input_type == 'noise':
if arguments.noise_type == 'gamma':
a, b = 1, 10
b = 1/float(b)
sz = (SPCSabs.size(0), SPCSabs.size(1), arguments.L1)
inp_np = np.random.gamma(a, b, sz)
plt.matshow(inp_np.squeeze().transpose()[:, :50])
inp = torch.from_numpy(inp_np).float()
elif arguments.noise_type == 'bernoulli':
sz = (SPCSabs.size(0), SPCSabs.size(1), arguments.L1)
mat = (1/float(8))*torch.ones(sz)
inp = torch.bernoulli(mat)
elif arguments.noise_type == 'gaussian':
inp = torch.randn(SPCSabs.size(0), SPCSabs.size(1), arguments.L1)
else:
raise ValueError('Whaaaat?')
elif arguments.input_type == 'autoenc':
inp = SPCSabs
arguments.L1 = arguments.L2
else:
raise ValueError('Whaaaaaat input_type?')
dataset = TensorDataset(data_tensor=inp,
target_tensor=SPCSabs,
lens=lens)
else:
raise ValueError('Whaaaat?')
kwargs = {'num_workers': 1, 'pin_memory': True} if arguments.cuda else {}
loader = data_utils.DataLoader(dataset, batch_size=arguments.batch_size, shuffle=True, **kwargs)
return loader