Python chainer.functions.tanh() Examples
The following are 30
code examples of chainer.functions.tanh().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: inference.py From chainer-gqn with MIT License | 6 votes |
def __call__(self, prev_hg, prev_he, prev_ce, x, v, r, u): xu = cf.concat((x, u), axis=1) xu = self.downsample_xu(xu) v = self.broadcast_v(v) if r.shape[2] == 1: r = self.broadcast_r(r) lstm_input = cf.concat((prev_he, prev_hg, xu, v, r), axis=1) gate_inputs = self.lstm(lstm_input) if self.use_cuda_kernel: next_h, next_c = CoreFunction()(gate_inputs, prev_ce) else: forget_gate_input, input_gate_input, tanh_input, output_gate_input = cf.split_axis( gate_inputs, 4, axis=1) forget_gate = cf.sigmoid(forget_gate_input) input_gate = cf.sigmoid(input_gate_input) next_c = forget_gate * prev_ce + input_gate * cf.tanh(tanh_input) output_gate = cf.sigmoid(output_gate_input) next_h = output_gate * cf.tanh(next_c) return next_h, next_c
Example #2
Source File: language_modeling.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __call__(self, x): if self.model_name == 'rnn' or self.model_name == 'lstm': h0 = self.embed(x[:, self.window_size - 1]) h1 = self.l1(h0) # h2 = self.l2(F.dropout(h1)) y = self.l3(h1) if self.model_name == 'lr' or self.model_name == '2FFNN': h = self.embed(x) h = h.reshape((h.shape[0], h.shape[1] * h.shape[2])) if self.model_name == 'lr': y = self.lr(h) if self.model_name == '2FFNN': y = self.nn1(h) y = F.tanh(y) y = self.nn2(y) return y # Dataset iterator to create a batch of sequences at different positions. # This iterator returns a pair of current words and the next words. Each # example is a part of sequences starting from the different offsets # equally spaced within the whole sequence.
Example #3
Source File: bound_by_tanh.py From chainerrl with MIT License | 6 votes |
def bound_by_tanh(x, low, high): """Bound a given value into [low, high] by tanh. Args: x (chainer.Variable): value to bound low (numpy.ndarray): lower bound high (numpy.ndarray): upper bound Returns: chainer.Variable """ assert isinstance(x, chainer.Variable) assert low is not None assert high is not None xp = cuda.get_array_module(x.array) x_scale = (high - low) / 2 x_scale = xp.expand_dims(xp.asarray(x_scale), axis=0) x_mean = (high + low) / 2 x_mean = xp.expand_dims(xp.asarray(x_mean), axis=0) return F.tanh(x) * x_scale + x_mean
Example #4
Source File: diaresnet.py From imgclsmob with MIT License | 6 votes |
def __call__(self, x, h, c): hy = [] cy = [] for i, name in enumerate(self.x_amps.layer_names): hx_i = h[i] cx_i = c[i] gates = self.x_amps[name](x) + self.h_amps[name](hx_i) i_gate, f_gate, c_gate, o_gate = F.split_axis(gates, indices_or_sections=4, axis=1) i_gate = F.sigmoid(i_gate) f_gate = F.sigmoid(f_gate) c_gate = F.tanh(c_gate) o_gate = F.sigmoid(o_gate) cy_i = (f_gate * cx_i) + (i_gate * c_gate) hy_i = o_gate * F.sigmoid(cy_i) cy.append(cy_i) hy.append(hy_i) x = self.dropout(hy_i) return hy, cy
Example #5
Source File: spp_discriminator.py From Semantic-Segmentation-using-Adversarial-Networks with MIT License | 6 votes |
def __call__(self, x): h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.tanh(self.fc4(h)) h = F.dropout(h, ratio=.5, train=self.train) h = F.tanh(self.fc5(h)) h = F.dropout(h, ratio=.5, train=self.train) h = self.fc6(h) return h
Example #6
Source File: fsns.py From see with GNU General Public License v3.0 | 6 votes |
def attend(self, encoded_features): self.out_lstm.reset_state() transformed_encoded_features = F.concat([F.expand_dims(self.transform_encoded_features(feature), axis=1) for feature in encoded_features], axis=1) concat_encoded_features = F.concat([F.expand_dims(e, axis=1) for e in encoded_features], axis=1) lstm_output = self.xp.zeros_like(encoded_features[0]) outputs = [] for _ in range(self.num_labels): transformed_lstm_output = self.transform_out_lstm_feature(lstm_output) attended_feats = [] for transformed_encoded_feature in F.separate(transformed_encoded_features, axis=1): attended_feat = transformed_encoded_feature + transformed_lstm_output attended_feat = F.tanh(attended_feat) attended_feats.append(self.generate_attended_feat(attended_feat)) attended_feats = F.concat(attended_feats, axis=1) alphas = F.softmax(attended_feats, axis=1) lstm_input_feature = F.batch_matmul(alphas, concat_encoded_features, transa=True) lstm_input_feature = F.squeeze(lstm_input_feature, axis=1) lstm_output = self.out_lstm(lstm_input_feature) outputs.append(lstm_output) return outputs
Example #7
Source File: generator.py From chainer-gqn with MIT License | 6 votes |
def __call__(self, prev_hg, prev_cg, prev_z, v, r, prev_u): v = self.broadcast_v(v) if r.shape[2] == 1: r = self.broadcast_r(r) lstm_input = cf.concat((prev_hg, v, r, prev_z), axis=1) gate_inputs = self.lstm(lstm_input) forget_gate_input, input_gate_input, tanh_input, output_gate_input = cf.split_axis( gate_inputs, 4, axis=1) forget_gate = cf.sigmoid(forget_gate_input) input_gate = cf.sigmoid(input_gate_input) next_c = forget_gate * prev_cg + input_gate * cf.tanh(tanh_input) output_gate = cf.sigmoid(output_gate_input) next_h = output_gate * cf.tanh(next_c) next_u = self.upsample_h(next_h) + prev_u return next_h, next_c, next_u
Example #8
Source File: mdn.py From models with MIT License | 6 votes |
def get_gaussian_params(self, x): h = F.tanh(self.l1(x)) h = self.l2(h) pi = h[:, :self.gaussian_mixtures] mu_var_dim = self.gaussian_mixtures * self.input_dim mu = h[:, self.gaussian_mixtures:self.gaussian_mixtures + mu_var_dim] log_var = h[:, self.gaussian_mixtures + mu_var_dim:] n_batch = x.shape[0] # mixing coefficients pi = F.reshape(pi, (n_batch, self.gaussian_mixtures)) pi = F.softmax(pi, axis=1) # mean mu = F.reshape(mu, (n_batch, self.gaussian_mixtures, self.input_dim)) # log variance log_var = F.reshape( log_var, (n_batch, self.gaussian_mixtures, self.input_dim)) return pi, mu, log_var
Example #9
Source File: net.py From tensorboardX with MIT License | 5 votes |
def encode(self, x): h1 = F.tanh(self.le1(x)) mu = self.le2_mu(h1) ln_var = self.le2_ln_var(h1) # log(sigma**2) return mu, ln_var
Example #10
Source File: net.py From models with MIT License | 5 votes |
def _encode(self, xs): exs = self.embed_mat(xs) h = F.tanh(self.l1(exs)) logits = F.softplus(self.l2(h)) logits = F.log(logits + 1e-10).reshape(-1, self.M, self.K) return logits, exs
Example #11
Source File: train_recursive_minibatch.py From pfio with MIT License | 5 votes |
def node(self, left, right): return F.tanh(self.l(F.concat((left, right))))
Example #12
Source File: test_inout.py From chainer with MIT License | 5 votes |
def get_model(self, use_bn=False, out_type=None): class Model(chainer.Chain): def __init__(self, use_bn=False, out_type=None): super(Model, self).__init__() self._use_bn = use_bn self._out_type = out_type with self.init_scope(): self.conv = L.Convolution2D(None, 32, ksize=3, stride=1) if self._use_bn: self.bn = L.BatchNormalization(32) def __call__(self, x): h = self.conv(x) if self._use_bn: h = self.bn(h) o1 = F.tanh(h) o2 = F.sigmoid(h) if self._out_type == 'dict': return { 'tanh': o1, 'sigmoid': o2 } elif self._out_type == 'tuple': return o1, o2 elif self._out_type == 'list': return [o1, o2] return Model(use_bn=use_bn, out_type=out_type)
Example #13
Source File: test_tanh.py From chainer with MIT License | 5 votes |
def forward(self): x = chainer.Variable(self.x) return functions.tanh(x)
Example #14
Source File: test_tanh.py From chainer with MIT License | 5 votes |
def forward(self, inputs, device): x, = inputs return functions.tanh(x),
Example #15
Source File: train_sentiment.py From pfio with MIT License | 5 votes |
def node(self, left, right): return F.tanh(self.l(F.concat((left, right))))
Example #16
Source File: networks.py From EPG with MIT License | 5 votes |
def f(self, x): for i, (w, b) in enumerate(zip(self._lst_w, self._lst_b)): x = F.linear(x, w, b) if i != len(self._lst_w) - 1: x = F.tanh(x) else: return self._out_fn(x)
Example #17
Source File: model.py From GP-GAN with MIT License | 5 votes |
def __call__(self, x): return F.tanh(x)
Example #18
Source File: StatelessLSTM.py From chainer-compiler with MIT License | 5 votes |
def lstm_forward(c_prev, x): a, i, f, o = _extract_gates(x) batch = len(x) a = F.tanh(a) i = F.sigmoid(i) f = F.sigmoid(f) o = F.sigmoid(o) c_next = a * i + f * c_prev h = o * F.tanh(c_next) return c_next, h
Example #19
Source File: subword.py From vecto with Mozilla Public License 2.0 | 5 votes |
def __call__(self, tokenIdsList_merged, tokenIdsList_merged_b, argsort, argsort_reverse, pList): # input a list of token ids, output a list of word embeddings tokenIdsList_merged += 2 input_emb = self.embed(tokenIdsList_merged) # input = input.reshape(input.shape[0], input.shape[1], input.shape[2]) input_emb = F.transpose(input_emb, (0, 2, 1)) input_emb = F.dropout(input_emb, self.dropout) # print(input.shape) if 'small' in self.subword: h = self.cnn1(input_emb) h = F.max(h, (2,)) else: h1 = self.cnn1(input_emb) h1 = F.max(h1, (2,)) h2 = self.cnn2(input_emb) h2 = F.max(h2, (2,)) h3 = self.cnn3(input_emb) h3 = F.max(h3, (2,)) h4 = self.cnn4(input_emb) h4 = F.max(h4, (2,)) h5 = self.cnn5(input_emb) h5 = F.max(h5, (2,)) h6 = self.cnn6(input_emb) h6 = F.max(h6, (2,)) h7 = self.cnn7(input_emb) h7 = F.max(h7, (2,)) h = F.concat((h1, h2, h3, h4, h5, h6, h7)) h = F.dropout(h, self.dropout) h = F.tanh(h) y = self.out(h) # print(y.shape) e = y e = F.reshape(e, (int(e.shape[0] / self.n_ngram), self.n_ngram, e.shape[1])) e = F.sum(e, axis=1) return e
Example #20
Source File: net.py From tensorboardX with MIT License | 5 votes |
def decode(self, z, sigmoid=True): h1 = F.tanh(self.ld1(z)) h2 = self.ld2(h1) if sigmoid: return F.sigmoid(h2) else: return h2
Example #21
Source File: cn_models.py From cryptotrader with MIT License | 5 votes |
def __call__(self, x): h = self.vision(x) # h = F.concat([h, self.portvec(x)], axis=1) h = self.conv(h) # h = self.cashbias(h) return F.tanh(h)
Example #22
Source File: modeling.py From models with MIT License | 5 votes |
def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `F.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return F.identity. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return F.identity act = activation_string.lower() if act == "linear": return F.identity elif act == "relu": return F.relu elif act == "gelu": return gelu elif act == "tanh": return F.tanh else: raise ValueError("Unsupported activation: %s" % act)
Example #23
Source File: voca.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x, pid): x = self.bn(x) x = F.swapaxes(x, axis1=1, axis2=3) y = F.expand_dims(F.expand_dims(pid, axis=-1), axis=-1) y = F.tile(y, reps=(1, 1, self.audio_window_size, 1)) x = F.concat((x, y), axis=1) x = self.branch(x) x = F.reshape(x, shape=(x.shape[0], -1)) x = F.concat((x, pid), axis=1) x = self.fc1(x) x = F.tanh(x) x = self.fc2(x) return x
Example #24
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 5 votes |
def __call__(self, s, xs): """Calculate all hidden states, cell states, and output prediction. Args: s (~chainer.Variable or None): Initial (hidden, cell) states. If ``None`` is specified zero-vector is used. xs (list of ~chianer.Variable): List of input sequences. Each element ``xs[i]`` is a :class:`chainer.Variable` holding a sequence. Return: (hy,cy): a pair of hidden and cell states at the end of the sequence, y: a sequence of pre-activatin vectors at the output layer """ if len(xs) > 1: sections = np.cumsum(np.array([len(x) for x in xs[:-1]], dtype=np.int32)) xs = F.split_axis(self.embed(F.concat(xs, axis=0)), sections, axis=0) else: xs = [ self.embed(xs[0]) ] if s is not None: hy, cy, ys = self.lstm(s[0], s[1], xs) else: hy, cy, ys = self.lstm(None, None, xs) #y = self.out(F.tanh(self.proj(F.concat(ys, axis=0)))) y = self.out(self.proj( F.dropout(F.concat(ys, axis=0), ratio=self.dropout))) return (hy,cy),y # interface for beam search
Example #25
Source File: EspNet_AttDot.py From chainer-compiler with MIT License | 5 votes |
def forward(self, enc_hs, dec_z, att_prev): '''AttDot forward :param enc_hs: :param dec_z: :param scaling: :return: ''' # EDIT(hamaji): scaling is now a local variable. scaling = 2.0 batch = len(enc_hs) # EDIT(momohatt): Make sure to initialize self.enc_h if self.enc_h is None: self.enc_h = F.pad_sequence(enc_hs) # utt x frame x hdim if self.pre_compute_enc_h is None: self.h_length = self.enc_h.shape[1] # utt x frame x att_dim self.pre_compute_enc_h = F.tanh( linear_tensor(self.mlp_enc, self.enc_h)) if dec_z is None: dec_z = chainer.Variable(self.xp.zeros( (batch, self.dunits), dtype=np.float32)) else: dec_z = F.reshape(dec_z, (batch, self.dunits)) # <phi (h_t), psi (s)> for all t u = F.broadcast_to(F.expand_dims(F.tanh(self.mlp_dec(dec_z)), 1), self.pre_compute_enc_h.shape) e = F.sum(self.pre_compute_enc_h * u, axis=2) # utt x frame # Applying a minus-large-number filter to make a probability value zero for a padded area # simply degrades the performance, and I gave up this implementation # Apply a scaling to make an attention sharp w = F.softmax(scaling * e) # weighted sum over flames # utt x hdim c = F.sum(self.enc_h * F.broadcast_to(F.expand_dims(w, 2), self.enc_h.shape), axis=1) return c, w
Example #26
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 5 votes |
def __call__(self, s, xs): """Calculate all hidden states, cell states, and output prediction. Args: s (~chainer.Variable or None): Initial (hidden, cell) states. If ``None`` is specified zero-vector is used. xs (list of ~chianer.Variable): List of input sequences. Each element ``xs[i]`` is a :class:`chainer.Variable` holding a sequence. Return: (hy,cy): a pair of hidden and cell states at the end of the sequence, y: a sequence of pre-activatin vectors at the output layer """ if len(xs) > 1: sections = np.cumsum(np.array([len(x) for x in xs[:-1]], dtype=np.int32)) xs = F.split_axis(self.embed(F.concat(xs, axis=0)), sections, axis=0) else: xs = [ self.embed(xs[0]) ] if s is not None: hy, cy, ys = self.lstm(s[0], s[1], xs) else: hy, cy, ys = self.lstm(None, None, xs) #y = self.out(F.tanh(self.proj(F.concat(ys, axis=0)))) y = self.out(self.proj( F.dropout(F.concat(ys, axis=0), ratio=self.dropout))) return (hy,cy),y # interface for beam search
Example #27
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 5 votes |
def __call__(self, s, xs): """Calculate all hidden states, cell states, and output prediction. Args: s (~chainer.Variable or None): Initial (hidden, cell) states. If ``None`` is specified zero-vector is used. xs (list of ~chianer.Variable): List of input sequences. Each element ``xs[i]`` is a :class:`chainer.Variable` holding a sequence. Return: (hy,cy): a pair of hidden and cell states at the end of the sequence, y: a sequence of pre-activatin vectors at the output layer """ if len(xs) > 1: sections = np.cumsum(np.array([len(x) for x in xs[:-1]], dtype=np.int32)) xs = F.split_axis(self.embed(F.concat(xs, axis=0)), sections, axis=0) else: xs = [ self.embed(xs[0]) ] if s is not None: hy, cy, ys = self.lstm(s[0], s[1], xs) else: hy, cy, ys = self.lstm(None, None, xs) #y = self.out(F.tanh(self.proj(F.concat(ys, axis=0)))) y = self.out(self.proj( F.dropout(F.concat(ys, axis=0), ratio=self.dropout))) return (hy,cy),y # interface for beam search
Example #28
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 5 votes |
def __call__(self, s, xs): """Calculate all hidden states, cell states, and output prediction. Args: s (~chainer.Variable or None): Initial (hidden, cell) states. If ``None`` is specified zero-vector is used. xs (list of ~chianer.Variable): List of input sequences. Each element ``xs[i]`` is a :class:`chainer.Variable` holding a sequence. Return: (hy,cy): a pair of hidden and cell states at the end of the sequence, y: a sequence of pre-activatin vectors at the output layer """ if len(xs) > 1: sections = np.cumsum(np.array([len(x) for x in xs[:-1]], dtype=np.int32)) xs = F.split_axis(self.embed(F.concat(xs, axis=0)), sections, axis=0) else: xs = [ self.embed(xs[0]) ] if s is not None: hy, cy, ys = self.lstm(s[0], s[1], xs) else: hy, cy, ys = self.lstm(None, None, xs) #y = self.out(F.tanh(self.proj(F.concat(ys, axis=0)))) y = self.out(self.proj( F.dropout(F.concat(ys, axis=0), ratio=self.dropout))) return (hy,cy),y # interface for beam search
Example #29
Source File: MathMisc.py From chainer-compiler with MIT License | 5 votes |
def main(): np.random.seed(314) x = np.random.rand(6, 4).astype(np.float32) s_int = np.array(-10) s_float = np.array(10.0) testtools.generate_testcase(Sin(), [x], subname='sin') testtools.generate_testcase(Sinh(), [x], subname='sinh') testtools.generate_testcase(Sign(), [x], subname='sign') testtools.generate_testcase(Cos(), [x], subname='cos') testtools.generate_testcase(Cosh(), [x], subname='cosh') testtools.generate_testcase(Tan(), [x], subname='tan') testtools.generate_testcase(Tanh(), [x], subname='tanh') testtools.generate_testcase(ArcSin(), [x], subname='arcsin') testtools.generate_testcase(ArcCos(), [x], subname='arccos') testtools.generate_testcase(ArcTan(), [x], subname='arctan') testtools.generate_testcase(Exp(), [x], subname='exp') testtools.generate_testcase(Log(), [x], subname='log') testtools.generate_testcase(Clip(), [x], subname='clip') testtools.generate_testcase(ClipNp(), [x], subname='clip_np') testtools.generate_testcase(Abs(), [x], subname='abs') testtools.generate_testcase(AbsNp(), [x], subname='abs_np') testtools.generate_testcase(Sqrt(), [x], subname='sqrt') testtools.generate_testcase(Round(), [x], subname='round') testtools.generate_testcase(AbsBuiltin(), [x], subname='abs_builtin') testtools.generate_testcase(AbsBuiltin(), [s_float], subname='abs_builtin_scalar_float') testtools.generate_testcase(AbsBuiltin(), [s_int], subname='abs_builtin_scalar_int')
Example #30
Source File: MathMisc.py From chainer-compiler with MIT License | 5 votes |
def forward(self, x): y1 = F.tanh(x) return y1