Python theano.sandbox.rng_mrg.MRG_RandomStreams() Examples
The following are 30
code examples of theano.sandbox.rng_mrg.MRG_RandomStreams().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.sandbox.rng_mrg
, or try the search function
.
Example #1
Source File: ff_layers.py From GroundHog with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, rng, std = 0.1, ndim=0, avg =0, shape_fn=None): """ """ assert rng is not None, "random number generator should not be empty!" super(GaussianNoise, self).__init__(0, 0, rng) self.std = scale self.avg = self.avg self.ndim = ndim self.shape_fn = shape_fn if self.shape_fn: # Name is not important as it is not a parameter of the model self.noise_term = theano.shared(numpy.zeros((2,)*ndim, dtype=theano.config.floatX), name='ndata') self.noise_params += [self.noise_term] self.noise_params_shape_fn += [shape_fn] self.trng = RandomStreams(rng.randint(1e5))
Example #2
Source File: test_multinomial_wo_replacement.py From D-VAE with MIT License | 6 votes |
def test_select_distinct(self): """ Tests that multinomial_wo_replacement always selects distinct elements """ th_rng = RandomStreams(12345) p = tensor.fmatrix() n = tensor.iscalar() m = th_rng.multinomial_wo_replacement(pvals=p, n=n) f = function([p, n], m, allow_input_downcast=True) n_elements = 1000 all_indices = range(n_elements) numpy.random.seed(12345) for i in [5, 10, 50, 100, 500, n_elements]: pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) res = f(pvals, i) res = numpy.squeeze(res) assert len(res) == i assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
Example #3
Source File: test_multinomial_wo_replacement.py From D-VAE with MIT License | 6 votes |
def test_fail_select_alot(self): """ Tests that multinomial_wo_replacement fails when asked to sample more elements than the actual number of elements """ th_rng = RandomStreams(12345) p = tensor.fmatrix() n = tensor.iscalar() m = th_rng.multinomial_wo_replacement(pvals=p, n=n) f = function([p, n], m, allow_input_downcast=True) n_elements = 100 n_selected = 200 numpy.random.seed(12345) pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) self.assertRaises(ValueError, f, pvals, n_selected)
Example #4
Source File: optimizers.py From seq2seq-keyphrase with MIT License | 6 votes |
def __init__(self, lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-8, save=False, rng=None, *args, **kwargs): print('args=%s' % str(args)) print('kwargs=%s' % str(kwargs)) super(Adam, self).__init__(**kwargs) self.__dict__.update(locals()) print(locals()) # if 'iterations' in kwargs: # print('iterations=%s' % str(kwargs['iterations'])) # self.iterations = shared_scalar(kwargs['iterations'], name='iteration') # else: # print('iterations not set') # self.iterations = shared_scalar(0, name='iteration') self.iterations = shared_scalar(0, name='iteration') self.lr = shared_scalar(lr, name='lr') # self.rng = MRG_RandomStreams(use_cuda=True) self.noise = [] self.forget = dict() # self.rng = rng self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon self.add(self.iterations) self.add(self.lr)
Example #5
Source File: ff_layers.py From LV_groundhog with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, rng, std = 0.1, ndim=0, avg =0, shape_fn=None): """ """ assert rng is not None, "random number generator should not be empty!" super(GaussianNoise, self).__init__(0, 0, rng) self.std = scale self.avg = self.avg self.ndim = ndim self.shape_fn = shape_fn if self.shape_fn: # Name is not important as it is not a parameter of the model self.noise_term = theano.shared(numpy.zeros((2,)*ndim, dtype=theano.config.floatX), name='ndata') self.noise_params += [self.noise_term] self.noise_params_shape_fn += [shape_fn] self.trng = RandomStreams(rng.randint(1e5))
Example #6
Source File: test_rng_mrg.py From D-VAE with MIT License | 6 votes |
def test_random_state_transfer(): """ Test that random state can be transferred from one theano graph to another. """ class Graph: def __init__(self, seed=123): self.rng = MRG_RandomStreams(seed) self.y = self.rng.uniform(size=(1,)) g1 = Graph(seed=123) f1 = theano.function([], g1.y) g2 = Graph(seed=987) f2 = theano.function([], g2.y) g2.rng.rstate = g1.rng.rstate for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates): su2[0].set_value(su1[0].get_value()) numpy.testing.assert_array_almost_equal(f1(), f2(), decimal=6)
Example #7
Source File: test_multinomial_wo_replacement.py From attention-lvcsr with MIT License | 6 votes |
def test_select_distinct(self): """ Tests that multinomial_wo_replacement always selects distinct elements """ th_rng = RandomStreams(12345) p = tensor.fmatrix() n = tensor.iscalar() m = th_rng.multinomial_wo_replacement(pvals=p, n=n) f = function([p, n], m, allow_input_downcast=True) n_elements = 1000 all_indices = range(n_elements) numpy.random.seed(12345) for i in [5, 10, 50, 100, 500, n_elements]: pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) res = f(pvals, i) res = numpy.squeeze(res) assert len(res) == i assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
Example #8
Source File: test_multinomial_wo_replacement.py From attention-lvcsr with MIT License | 6 votes |
def test_fail_select_alot(self): """ Tests that multinomial_wo_replacement fails when asked to sample more elements than the actual number of elements """ th_rng = RandomStreams(12345) p = tensor.fmatrix() n = tensor.iscalar() m = th_rng.multinomial_wo_replacement(pvals=p, n=n) f = function([p, n], m, allow_input_downcast=True) n_elements = 100 n_selected = 200 numpy.random.seed(12345) pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) self.assertRaises(ValueError, f, pvals, n_selected)
Example #9
Source File: test_rng_mrg.py From attention-lvcsr with MIT License | 6 votes |
def test_deterministic(): seed = utt.fetch_seed() sample_size = (10, 20) test_use_cuda = [False] if cuda_available: test_use_cuda.append(True) for use_cuda in test_use_cuda: # print 'use_cuda =', use_cuda R = MRG_RandomStreams(seed=seed, use_cuda=use_cuda) u = R.uniform(size=sample_size) f = theano.function([], u) fsample1 = f() fsample2 = f() assert not numpy.allclose(fsample1, fsample2) R2 = MRG_RandomStreams(seed=seed, use_cuda=use_cuda) u2 = R2.uniform(size=sample_size) g = theano.function([], u2) gsample1 = g() gsample2 = g() assert numpy.allclose(fsample1, gsample1) assert numpy.allclose(fsample2, gsample2)
Example #10
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 6 votes |
def call(self,x,training=None): deta1 = 0.3 deta2 = 0.3 deta3 = 0.3 seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32') theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32') theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32') theta = K.concatenate([theta1,theta2,theta3],axis=-1) theta = K.tile(theta,x.shape[1]) theta = theta.reshape((x.shape[0], x.shape[1], 3)) theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2])) M = _fusion(theta) output = _transform_rot(M, x) return K.in_train_phase(output,x,training = training)
Example #11
Source File: test_rng_mrg.py From attention-lvcsr with MIT License | 6 votes |
def test_GPU_nstreams_limit(): """ Verify that a ValueError is raised when n_streams is greater than 2**20 on GPU. This is the value of (NUM_VECTOR_OP_THREADS_PER_BLOCK * NUM_VECTOR_OP_BLOCKS). """ if not cuda_available: raise SkipTest('Optional package cuda not available') seed = 12345 R = MRG_RandomStreams(seed=seed, use_cuda=True) def eval_uniform(size, nstreams): if theano.config.mode == "FAST_COMPILE": mode = "FAST_RUN" else: mode = copy.copy(theano.compile.get_default_mode()) mode.check_py_code = False out = R.uniform(size=size, nstreams=nstreams, dtype='float32') f = theano.function([], out, mode=mode) return f() eval_uniform((10,), 2**20) assert_raises(ValueError, eval_uniform, (10,), 2**20 + 1)
Example #12
Source File: nn.py From deligan with MIT License | 5 votes |
def __init__(self, incoming, sigma=0.1, **kwargs): super(GaussianNoiseLayer, self).__init__(incoming, **kwargs) self._srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579)) self.sigma = sigma
Example #13
Source File: test_pkl_utils.py From attention-lvcsr with MIT License | 5 votes |
def test_dump_load_mrg(self): rng = MRG_RandomStreams(use_cuda=cuda_ndarray.cuda_enabled) with open('test', 'wb') as f: dump(rng, f) with open('test', 'rb') as f: rng = load(f) assert type(rng) == MRG_RandomStreams
Example #14
Source File: main.py From attention-lvcsr with MIT License | 5 votes |
def add_exploration(recognizer, data, train_conf): prediction = None prediction_mask = None explore_conf = train_conf.get('exploration', 'imitative') if explore_conf in ['greedy', 'mixed']: length_expand = 10 prediction = recognizer.get_generate_graph( n_steps=recognizer.labels.shape[0] + length_expand)['outputs'] prediction_mask = tensor.lt( tensor.cumsum(tensor.eq(prediction, data.eos_label), axis=0), 1).astype(floatX) prediction_mask = tensor.roll(prediction_mask, 1, 0) prediction_mask = tensor.set_subtensor( prediction_mask[0, :], tensor.ones_like(prediction_mask[0, :])) if explore_conf == 'mixed': batch_size = recognizer.labels.shape[1] targets = tensor.concatenate([ recognizer.labels, tensor.zeros((length_expand, batch_size), dtype='int64')]) targets_mask = tensor.concatenate([ recognizer.labels_mask, tensor.zeros((length_expand, batch_size), dtype=floatX)]) rng = MRG_RandomStreams() generate = rng.binomial((batch_size,), p=0.5, dtype='int64') prediction = (generate[None, :] * prediction + (1 - generate[None, :]) * targets) prediction_mask = (tensor.cast(generate[None, :] * prediction_mask, floatX) + tensor.cast((1 - generate[None, :]) * targets_mask, floatX)) prediction_mask = theano.gradient.disconnected_grad(prediction_mask) elif explore_conf != 'imitative': raise ValueError return prediction, prediction_mask
Example #15
Source File: basic.py From LV_groundhog with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, output_layer, sample_fn, indx_word="/data/lisa/data/PennTreebankCorpus/dictionaries.npz", indx_word_src=None, rng =None): super(Model, self).__init__() if rng == None: rng = numpy.random.RandomState(123) assert hasattr(output_layer,'grads'), \ 'The model needs to have gradients defined' self.rng = rng self.trng = RandomStreams(rng.randint(1000)+1) self.sample_fn = sample_fn self.indx_word = indx_word self.indx_word_src = indx_word_src self.param_grads = output_layer.grads self.params = output_layer.params self.updates = output_layer.updates self.noise_params = output_layer.noise_params self.noise_params_shape_fn = output_layer.noise_params_shape_fn self.inputs = output_layer.inputs self.params_grad_scale = output_layer.params_grad_scale self.train_cost = output_layer.cost self.out = output_layer.out self.schedules = output_layer.schedules self.output_layer = output_layer self.properties = output_layer.properties self._get_samples = output_layer._get_samples
Example #16
Source File: test_rng_mrg.py From attention-lvcsr with MIT License | 5 votes |
def test_consistency_cpu_serial(): """ Verify that the random numbers generated by mrg_uniform, serially, are the same as the reference (Java) implementation by L'Ecuyer et al. """ seed = 12345 n_samples = 5 n_streams = 12 n_substreams = 7 samples = [] curr_rstate = numpy.array([seed] * 6, dtype='int32') for i in range(n_streams): stream_rstate = curr_rstate.copy() for j in range(n_substreams): rstate = theano.shared(numpy.array([stream_rstate.copy()], dtype='int32')) new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype=config.floatX, size=(1,)) # Not really necessary, just mimicking # rng_mrg.MRG_RandomStreams' behavior sample.rstate = rstate sample.update = (rstate, new_rstate) rstate.default_update = new_rstate f = theano.function([], sample) for k in range(n_samples): s = f() samples.append(s) # next substream stream_rstate = rng_mrg.ff_2p72(stream_rstate) # next stream curr_rstate = rng_mrg.ff_2p134(curr_rstate) samples = numpy.array(samples).flatten() assert(numpy.allclose(samples, java_samples))
Example #17
Source File: test_rng_curand.py From attention-lvcsr with MIT License | 5 votes |
def compare_speed(): # To run this speed comparison # cd <directory of this file> # THEANO_FLAGS=device=gpu \ # python -c 'import test_rng_curand; test_rng_curand.compare_speed()' mrg = MRG_RandomStreams() crn = CURAND_RandomStreams(234) N = 1000 * 100 dest = theano.shared(numpy.zeros(N, dtype=theano.config.floatX)) mrg_u = theano.function([], [], updates={dest: mrg.uniform((N,))}, profile='mrg uniform') crn_u = theano.function([], [], updates={dest: crn.uniform((N,))}, profile='crn uniform') mrg_n = theano.function([], [], updates={dest: mrg.normal((N,))}, profile='mrg normal') crn_n = theano.function([], [], updates={dest: crn.normal((N,))}, profile='crn normal') for f in mrg_u, crn_u, mrg_n, crn_n: # don't time the first call, it has some startup cost print('DEBUGPRINT') print('----------') theano.printing.debugprint(f) for i in range(100): for f in mrg_u, crn_u, mrg_n, crn_n: # don't time the first call, it has some startup cost f.fn.time_thunks = (i > 0) f()
Example #18
Source File: test_rng_mrg.py From attention-lvcsr with MIT License | 5 votes |
def t_binomial(mean, size, const_size, var_input, input, steps, rtol): R = MRG_RandomStreams(234, use_cuda=False) u = R.binomial(size=size, p=mean) f = theano.function(var_input, u, mode=mode) out = f(*input) # Increase the number of steps if sizes implies only a few samples if numpy.prod(const_size) < 10: steps_ = steps * 100 else: steps_ = steps basictest(f, steps_, const_size, prefix='mrg cpu', inputs=input, allow_01=True, target_avg=mean, mean_rtol=rtol) if mode != 'FAST_COMPILE' and cuda_available: R = MRG_RandomStreams(234, use_cuda=True) u = R.binomial(size=size, p=mean, dtype='float32') # well, it's really that this test w GPU doesn't make sense otw assert u.dtype == 'float32' f = theano.function(var_input, theano.Out( theano.sandbox.cuda.basic_ops.gpu_from_host(u), borrow=True), mode=mode_with_gpu) gpu_out = numpy.asarray(f(*input)) basictest(f, steps_, const_size, prefix='mrg gpu', inputs=input, allow_01=True, target_avg=mean, mean_rtol=rtol) numpy.testing.assert_array_almost_equal(out, gpu_out, decimal=6) RR = theano.tensor.shared_randomstreams.RandomStreams(234) uu = RR.binomial(size=size, p=mean) ff = theano.function(var_input, uu, mode=mode) # It's not our problem if numpy generates 0 or 1 basictest(ff, steps_, const_size, prefix='numpy', allow_01=True, inputs=input, target_avg=mean, mean_rtol=rtol)
Example #19
Source File: rng.py From dcgan_code with MIT License | 5 votes |
def set_seed(n): global seed, py_rng, np_rng, t_rng seed = n py_rng = Random(seed) np_rng = RandomState(seed) t_rng = RandomStreams(seed)
Example #20
Source File: ff_layers.py From LV_groundhog with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, rng = None, name=None, dropout=1.): super(DropOp, self).__init__(0, 0, None, name) self.dropout = dropout if dropout < 1.: self.trng = RandomStreams(rng.randint(1e5))
Example #21
Source File: custom_layers.py From luna16 with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, incoming, p=0.5, rescale=True, **kwargs): super(SpatialDropoutLayer, self).__init__(incoming, **kwargs) self._srng = RandomStreams(get_rng().randint(1, 2147462579)) self.p = p self.rescale = rescale
Example #22
Source File: dropout.py From LasagneNLP with Apache License 2.0 | 5 votes |
def __init__(self, incoming, sigma=1.0, **kwargs): super(GaussianDropoutLayer, self).__init__(incoming, **kwargs) self._srng = RandomStreams(get_rng().randint(1, 2147462579)) self.sigma = sigma
Example #23
Source File: dialog_encdec.py From hred-latent-piecewise with GNU General Public License v3.0 | 5 votes |
def __init__(self, state, rng, parent, dialog_encoder, word_embedding_param): EncoderDecoderBase.__init__(self, state, rng, parent) # Take as input the encoder instance for the embeddings.. # To modify in the future assert(word_embedding_param != None) self.word_embedding_param = word_embedding_param self.dialog_encoder = dialog_encoder self.trng = MRG_RandomStreams(self.seed) self.init_params()
Example #24
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). """ if dtype is None: dtype = floatx() if hasattr(value, 'tocoo'): _assert_sparse_module() variable = th_sparse_module.as_sparse_variable( value, name=_prepare_name(name, 'variable')) else: if isinstance(value, (theano.tensor.TensorVariable, theano.tensor.sharedvar.TensorSharedVariable, theano.tensor.TensorConstant)): # Support for RandomStreams().normal(), .uniform(). value = value.eval() value = np.asarray(value, dtype=dtype) variable = theano.shared(value=value, name=_prepare_name(name, 'variable'), strict=False) variable._keras_shape = value.shape variable._uses_learning_phase = False variable.constraint = constraint return variable
Example #25
Source File: nn.py From deligan with MIT License | 5 votes |
def __init__(self, incoming, sigma=0.1, **kwargs): super(GaussianNoiseLayer, self).__init__(incoming, **kwargs) self._srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579)) self.sigma = sigma
Example #26
Source File: basic.py From GroundHog with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, output_layer, sample_fn, indx_word="/data/lisa/data/PennTreebankCorpus/dictionaries.npz", indx_word_src=None, rng =None): super(Model, self).__init__() if rng == None: rng = numpy.random.RandomState(123) assert hasattr(output_layer,'grads'), \ 'The model needs to have gradients defined' self.rng = rng self.trng = RandomStreams(rng.randint(1000)+1) self.sample_fn = sample_fn self.indx_word = indx_word self.indx_word_src = indx_word_src self.param_grads = output_layer.grads self.params = output_layer.params self.updates = output_layer.updates self.noise_params = output_layer.noise_params self.noise_params_shape_fn = output_layer.noise_params_shape_fn self.inputs = output_layer.inputs self.params_grad_scale = output_layer.params_grad_scale self.train_cost = output_layer.cost self.out = output_layer.out self.schedules = output_layer.schedules self.output_layer = output_layer self.properties = output_layer.properties self._get_samples = output_layer._get_samples
Example #27
Source File: ff_layers.py From GroundHog with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, rng = None, name=None, dropout=1.): super(DropOp, self).__init__(0, 0, None, name) self.dropout = dropout if dropout < 1.: self.trng = RandomStreams(rng.randint(1e5))
Example #28
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 5 votes |
def dropout(x, level, noise_shape=None, seed=None): '''Sets entries in `x` to zero at random, while scaling the entire tensor. # Arguments x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of `x` seed: random seed to ensure determinism. ''' if level < 0. or level >= 1: raise ValueError('Dropout level must be in interval [0, 1[.') if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) retain_prob = 1. - level if noise_shape is None: random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype) else: random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype) random_tensor = T.patternbroadcast(random_tensor, [dim == 1 for dim in noise_shape]) x *= random_tensor x /= retain_prob return x
Example #29
Source File: attacks_th.py From robust_physical_perturbations with MIT License | 5 votes |
def vatm(model, x, predictions, eps, num_iterations=1, xi=1e-6, clip_min=None, clip_max=None, seed=12345): """ Theano implementation of the perturbation method used for virtual adversarial training: https://arxiv.org/abs/1507.00677 :param model: the model which returns the network unnormalized logits :param x: the input placeholder :param predictions: the model's unnormalized output tensor :param eps: the epsilon (input variation parameter) :param num_iterations: the number of iterations :param xi: the finite difference parameter :param clip_min: optional parameter that can be used to set a minimum value for components of the example returned :param clip_max: optional parameter that can be used to set a maximum value for components of the example returned :param seed: the seed for random generator :return: a tensor for the adversarial example """ eps = np.asarray(eps, dtype=floatX) xi = np.asarray(xi, dtype=floatX) rng = RandomStreams(seed=seed) d = rng.normal(size=x.shape, dtype=x.dtype) for i in range(num_iterations): d = xi * utils_th.l2_batch_normalize(d) logits_d = model(x + d) kl = utils_th.kl_with_logits(predictions, logits_d) Hd = T.grad(kl.sum(), d) d = gradient.disconnected_grad(Hd) d = eps * utils_th.l2_batch_normalize(d) adv_x = gradient.disconnected_grad(x + d) if (clip_min is not None) and (clip_max is not None): adv_x = T.clip(adv_x, clip_min, clip_max) return adv_x
Example #30
Source File: attacks_th.py From robust_physical_perturbations with MIT License | 5 votes |
def vatm(model, x, predictions, eps, num_iterations=1, xi=1e-6, clip_min=None, clip_max=None, seed=12345): """ Theano implementation of the perturbation method used for virtual adversarial training: https://arxiv.org/abs/1507.00677 :param model: the model which returns the network unnormalized logits :param x: the input placeholder :param predictions: the model's unnormalized output tensor :param eps: the epsilon (input variation parameter) :param num_iterations: the number of iterations :param xi: the finite difference parameter :param clip_min: optional parameter that can be used to set a minimum value for components of the example returned :param clip_max: optional parameter that can be used to set a maximum value for components of the example returned :param seed: the seed for random generator :return: a tensor for the adversarial example """ eps = np.asarray(eps, dtype=floatX) xi = np.asarray(xi, dtype=floatX) rng = RandomStreams(seed=seed) d = rng.normal(size=x.shape, dtype=x.dtype) for i in range(num_iterations): d = xi * utils_th.l2_batch_normalize(d) logits_d = model(x + d) kl = utils_th.kl_with_logits(predictions, logits_d) Hd = T.grad(kl.sum(), d) d = gradient.disconnected_grad(Hd) d = eps * utils_th.l2_batch_normalize(d) adv_x = gradient.disconnected_grad(x + d) if (clip_min is not None) and (clip_max is not None): adv_x = T.clip(adv_x, clip_min, clip_max) return adv_x