def __init__(self, img_feat, tags_idx, a_tags_idx, test_tags_idx, z_dim, vocab_processor):
self.z_sampler = stats.truncnorm((-1 - 0.) / 1., (1 - 0.) / 1., loc=0., scale=1)
self.length = len(tags_idx)
self.current = 0
self.img_feat = img_feat
self.tags_idx = tags_idx
self.a_tags_idx = a_tags_idx
self.w_idx = np.arange(self.length)
self.w_idx2 = np.arange(self.length)
self.tmp = 0
self.epoch = 0
self.vocab_processor = vocab_processor
self.vocab_size = len(vocab_processor._reverse_mapping)
self.unk_id = vocab_processor._mapping['<UNK>']
self.eos_id = vocab_processor._mapping['<EOS>']
self.hair_id = vocab_processor._mapping['hair']
self.eyes_id = vocab_processor._mapping['eyes']
self.gen_info()
self.test_tags_idx = self.gen_test_hot(test_tags_idx)
self.fixed_z = self.next_noise_batch(len(self.test_tags_idx), z_dim)
idx = np.random.permutation(np.arange(self.length))
self.w_idx2 = self.w_idx2[idx]
python类truncnorm()的实例源码
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.group1 = nn.Sequential(
OrderedDict([
('fc', nn.Linear(2048, num_classes))
])
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def test_truncnorm_prior(self):
"""check truncnorm RV"""
msg = "truncnorm prior: incorrect test {0}"
test_vals = [-0.1, 0.0, 0.5, 1.0, 1.1]
mean, std, low, up = 0.9, 0.1, 0.0, 1.0
a, b = (low - mean) / std, (up - mean) / std
correct = truncnorm(loc=mean, scale=std, a=a, b=b)
for ii, xx in enumerate(test_vals):
assert self.gPrior.pdf(xx) == correct.pdf(xx), msg.format(ii)
def randomGammaCorrection(image):
lower = 0.5
upper = 1.5
mu = 1
sigma = 0.5
alpha = stats.truncnorm((lower-mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
image = (pow(image/255.0, alpha.rvs(1)[0]) * 255).astype(np.uint8)
return image
# def rle(img):
# '''
# img: numpy array, 1 - mask, 0 - background
# Returns run length as string formated
# '''
# bytes = np.where(img.flatten() == 1)[0]
# runs = []
# prev = -2
# for b in bytes:
# if (b > prev + 1): runs.extend((b + 1, 0))
# runs[-1] += 1
# prev = b
#
# return ' '.join([str(i) for i in runs])
# https://www.kaggle.com/stainsby/fast-tested-rle
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.group1 = nn.Sequential(
OrderedDict([
('fc', nn.Linear(2048, num_classes))
])
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.maxPool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.maxPool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.avgpool = nn.AvgPool2d(kernel_size=8)
self.dropout = nn.Dropout()
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def GaussianDistribution(mean, std, a=None, b=None):
if std == .0:
return Distribution.ConstantDistribution(mean)
if a is None and b is None:
# Unbounded Gaussian.
return N(mean, std)
else:
dist = UncertainVariable(ss.truncnorm(
a = -np.inf if a is None else (a - mean) / std,
b = np.inf if b is None else (b - mean) / std,
loc = mean, scale = std))
logging.debug('Distribution -- truncated gaussian: {}, {} [{}, {}]'.format(
dist.mean, np.sqrt(dist.var), a, b))
return dist
def __init__(self, num_classes=None, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
self.num_classes = num_classes
if self.num_classes is not None:
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_model(n_obs=50, true_params=None, seed_obs=None, stochastic=True):
"""Return a complete Ricker model in inference task.
This is a simplified example that achieves reasonable predictions. For more extensive treatment
and description using 13 summary statistics, see:
Wood, S. N. (2010) Statistical inference for noisy nonlinear ecological dynamic systems,
Nature 466, 1102–1107.
Parameters
----------
n_obs : int, optional
Number of observations.
true_params : list, optional
Parameters with which the observed data is generated.
seed_obs : int, optional
Seed for the observed data generation.
stochastic : bool, optional
Whether to use the stochastic or deterministic Ricker model.
Returns
-------
m : elfi.ElfiModel
"""
if stochastic:
simulator = partial(stochastic_ricker, n_obs=n_obs)
if true_params is None:
true_params = [3.8, 0.3, 10.]
else:
simulator = partial(ricker, n_obs=n_obs)
if true_params is None:
true_params = [3.8]
m = elfi.ElfiModel()
y_obs = simulator(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs))
sim_fn = partial(simulator, n_obs=n_obs)
sumstats = []
if stochastic:
elfi.Prior(ss.expon, np.e, 2, model=m, name='t1')
elfi.Prior(ss.truncnorm, 0, 5, model=m, name='t2')
elfi.Prior(ss.uniform, 0, 100, model=m, name='t3')
elfi.Simulator(sim_fn, m['t1'], m['t2'], m['t3'], observed=y_obs, name='Ricker')
sumstats.append(elfi.Summary(partial(np.mean, axis=1), m['Ricker'], name='Mean'))
sumstats.append(elfi.Summary(partial(np.var, axis=1), m['Ricker'], name='Var'))
sumstats.append(elfi.Summary(num_zeros, m['Ricker'], name='#0'))
elfi.Discrepancy(chi_squared, *sumstats, name='d')
else: # very simple deterministic case
elfi.Prior(ss.expon, np.e, model=m, name='t1')
elfi.Simulator(sim_fn, m['t1'], observed=y_obs, name='Ricker')
sumstats.append(elfi.Summary(partial(np.mean, axis=1), m['Ricker'], name='Mean'))
elfi.Distance('euclidean', *sumstats, name='d')
return m