Python torch.std() Examples
The following are 30
code examples of torch.std().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: model.py From torch-light with MIT License | 6 votes |
def __init__(self, n_head, d_model, dropout=0.5): super().__init__() self.n_head = n_head self.d_v = self.d_k = d_k = d_model // n_head for name in ["w_qs", "w_ks", "w_vs"]: self.__setattr__(name, nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))) self.attention = ScaledDotProductAttention(d_k, dropout) self.lm = LayerNorm(d_model) self.w_o = nn.Linear(d_model, d_model, bias=False) self.dropout = nn.Dropout(dropout) self.w_qs.data.normal_(std=const.INIT_RANGE) self.w_ks.data.normal_(std=const.INIT_RANGE) self.w_vs.data.normal_(std=const.INIT_RANGE)
Example #2
Source File: NP.py From nispat with GNU General Public License v3.0 | 6 votes |
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10): y_sigma = None z_context = self.xy_to_z_params(x_context, y_context) if self.training: z_all = self.xy_to_z_params(x_all, y_all) z_sample = self.reparameterise(z_all) y_hat = self.decoder.forward(z_sample, x_all) else: z_all = z_context if self.type == 'ST': temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = 'cpu') elif self.type == 'MT': temp = torch.zeros([n,y_context.shape[0],1,y_context.shape[2],y_context.shape[3], y_context.shape[4]], device = 'cpu') for i in range(n): z_sample = self.reparameterise(z_all) temp[i,:] = self.decoder.forward(z_sample, x_context) y_hat = torch.mean(temp, dim=0).to(self.device) if n > 1: y_sigma = torch.std(temp, dim=0).to(self.device) return y_hat, z_all, z_context, y_sigma ###############################################################################
Example #3
Source File: NPR.py From nispat with GNU General Public License v3.0 | 6 votes |
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10): y_sigma = None y_sigma_84 = None z_context = self.xy_to_z_params(x_context, y_context) if self.training: z_all = self.xy_to_z_params(x_all, y_all) z_sample = self.reparameterise(z_all) y_hat, y_hat_84 = self.decoder.forward(z_sample) else: z_all = z_context temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device) temp_84 = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device) for i in range(n): z_sample = self.reparameterise(z_all) temp[i,:], temp_84[i,:] = self.decoder.forward(z_sample) y_hat = torch.mean(temp, dim=0).to(self.device) y_hat_84 = torch.mean(temp_84, dim=0).to(self.device) if n > 1: y_sigma = torch.std(temp, dim=0).to(self.device) y_sigma_84 = torch.std(temp_84, dim=0).to(self.device) return y_hat, y_hat_84, z_all, z_context, y_sigma, y_sigma_84 ###############################################################################
Example #4
Source File: field.py From parser with MIT License | 6 votes |
def build(self, corpus, min_freq=1, embed=None): sequences = getattr(corpus, self.name) counter = Counter(token for sequence in sequences for token in self.transform(sequence)) self.vocab = Vocab(counter, min_freq, self.specials) if not embed: self.embed = None else: tokens = self.transform(embed.tokens) # if the `unk` token has existed in the pretrained, # then replace it with a self-defined one if embed.unk: tokens[embed.unk_index] = self.unk self.vocab.extend(tokens) self.embed = torch.zeros(len(self.vocab), embed.dim) self.embed[self.vocab.token2id(tokens)] = embed.vectors self.embed /= torch.std(self.embed)
Example #5
Source File: model.py From torch-light with MIT License | 6 votes |
def __init__(self, n_head, d_model, dropout): super().__init__() self.n_head = n_head self.d_v = self.d_k = d_k = d_model // n_head for name in ["w_qs", "w_ks", "w_vs"]: self.__setattr__(name, nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))) self.attention = ScaledDotProductAttention(d_k, dropout) self.lm = LayerNorm(d_model) self.w_o = nn.Linear(d_model, d_model, bias=False) self.dropout = nn.Dropout(dropout) self.w_qs.data.normal_(std=const.INIT_RANGE) self.w_ks.data.normal_(std=const.INIT_RANGE) self.w_vs.data.normal_(std=const.INIT_RANGE)
Example #6
Source File: transforms.py From torchsupport with MIT License | 6 votes |
def __call__(self, x): if not self.auto: for idx in range(x.shape[0]): xmean = torch.mean(x[idx, :, :]) xstd = torch.std(x[idx, :, :]) x[idx, :, :] = (x[idx, :, :] - xmean) / xstd if xstd == 0: x[idx, :, :] = 0.0 else: view = x.view(x.shape[0], -1) length = view.shape[1] mean = view.mean(dim=1) var = view.var(dim=1) self.var = var / (self.count + 1) + self.count / (self.count + 1) * self.var self.var += self.count / ((self.count + 1) ** 2) * (self.mean - mean) ** 2 self.mean = (self.count * self.mean + view.mean(dim=1)) / (self.count + 1) for idx in range(x.shape[0]): x[idx, :, :] = (x[idx, :, :] - self.mean) / torch.sqrt(self.var) if xstd == 0: x[idx, :, :] = 0.0 return x
Example #7
Source File: model.py From sodeep with BSD 3-Clause Clear License | 6 votes |
def comp(self, inpu): in_mat1 = torch.triu(inpu.repeat(inpu.size(0), 1), diagonal=1) in_mat2 = torch.triu(inpu.repeat(inpu.size(0), 1).t(), diagonal=1) comp_first = (in_mat1 - in_mat2) comp_second = (in_mat2 - in_mat1) std1 = torch.std(comp_first).item() std2 = torch.std(comp_second).item() comp_first = torch.sigmoid(comp_first * (6.8 / std1)) comp_second = torch.sigmoid(comp_second * (6.8 / std2)) comp_first = torch.triu(comp_first, diagonal=1) comp_second = torch.triu(comp_second, diagonal=1) return (torch.sum(comp_first, 1) + torch.sum(comp_second, 0) + 1) / inpu.size(0)
Example #8
Source File: parse_nk.py From self-attentive-parser with MIT License | 6 votes |
def forward(self, z): if z.size(-1) == 1: return z mu = torch.mean(z, keepdim=True, dim=-1) sigma = torch.std(z, keepdim=True, dim=-1) ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps) if self.affine: ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out) # NOTE(nikita): the t2t code does the following instead, with eps=1e-6 # However, I currently have no reason to believe that this difference in # implementation matters. # mu = torch.mean(z, keepdim=True, dim=-1) # variance = torch.mean((z - mu.expand_as(z))**2, keepdim=True, dim=-1) # ln_out = (z - mu.expand_as(z)) * torch.rsqrt(variance + self.eps).expand_as(z) # ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out) return ln_out # %%
Example #9
Source File: filters.py From pyfilter with MIT License | 6 votes |
def test_SDE(self): def f(x, a, s): return -a * x def g(x, a, s): return s em = AffineEulerMaruyama((f, g), (0.02, 0.15), Normal(0., 1.), Normal(0., 1.), dt=1e-2, num_steps=10) model = LinearGaussianObservations(em, scale=1e-3) x, y = model.sample_path(500) for filt in [SISR(model, 500, proposal=Bootstrap()), UKF(model)]: filt = filt.initialize().longfilter(y) means = filt.result.filter_means if isinstance(filt, UKF): means = means[:, 0] self.assertLess(torch.std(x - means), 5e-2)
Example #10
Source File: model.py From torch-light with MIT License | 5 votes |
def reset_parameters(self): self.enc_ebd.weight.data.normal_(std=INIT_RANGE) self.seg_ebd.weight.data.normal_(std=INIT_RANGE) self.transform.weight.data.normal_(std=INIT_RANGE) self.transform.bias.data.zero_()
Example #11
Source File: model.py From torch-light with MIT License | 5 votes |
def __init__(self, d_model): super().__init__() self.linear = nn.Linear(d_model, d_model) self.linear.weight.data.normal_(std=INIT_RANGE) self.linear.bias.data.zero_()
Example #12
Source File: model.py From torch-light with MIT License | 5 votes |
def reset_parameters(self): self.w_qs.data.normal_(std=INIT_RANGE) self.w_ks.data.normal_(std=INIT_RANGE) self.w_vs.data.normal_(std=INIT_RANGE) self.w_o.weight.data.normal_(std=INIT_RANGE)
Example #13
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #14
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #15
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #16
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #17
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #18
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #19
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #20
Source File: model.py From torch-light with MIT License | 5 votes |
def forward(self, input): mu = torch.mean(input, dim=-1, keepdim=True) sigma = torch.std(input, dim=-1, keepdim=True).clamp(min=self.eps) output = (input - mu) / sigma return output * self.gamma.expand_as(output) + self.beta.expand_as(output)
Example #21
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #22
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #23
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #24
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #25
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #26
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #27
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #28
Source File: HardNet.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #29
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
Example #30
Source File: architectures.py From affnet with MIT License | 5 votes |
def input_norm(self,x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1).detach() sp = torch.std(flat, dim=1).detach() + 1e-7 return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)