Python keras.backend.random_binomial() Examples
The following are 8
code examples of keras.backend.random_binomial().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: layers.py From keras_bn_library with MIT License | 6 votes |
def call(self, x, mask=None): if self.mode == 'maximum_likelihood': # draw maximum likelihood sample from Bernoulli distribution # x* = argmax_x p(x) = 1 if p(x=1) >= 0.5 # 0 otherwise return K.round(x) elif self.mode == 'random': # draw random sample from Bernoulli distribution # x* = x ~ p(x) = 1 if p(x=1) > uniform(0, 1) # 0 otherwise #return self.srng.binomial(size=x.shape, n=1, p=x, dtype=K.floatx()) return K.random_binomial(x.shape, p=x, dtype=K.floatx()) elif self.mode == 'mean_field': # draw mean-field approximation sample from Bernoulli distribution # x* = E[p(x)] = E[Bern(x; p)] = p return x elif self.mode == 'nrlu': return nrlu(x) else: raise NotImplementedError('Unknown sample mode!')
Example #2
Source File: rbm.py From keras_bn_library with MIT License | 6 votes |
def sample_h_given_x(self, x): h_pre = K.dot(x, self.Wrbm) + self.bh h_sigm = self.activation(self.scaling_h_given_x * h_pre) # drop out noise #if(0.0 < self.p < 1.0): # noise_shape = self._get_noise_shape(h_sigm) # h_sigm = K.in_train_phase(K.dropout(h_sigm, self.p, noise_shape), h_sigm) if(self.hidden_unit_type == 'binary'): h_samp = K.random_binomial(shape=h_sigm.shape, p=h_sigm) # random sample # \hat{h} = 1, if p(h=1|x) > uniform(0, 1) # 0, otherwise elif(self.hidden_unit_type == 'nrlu'): h_samp = nrlu(h_pre) else: h_samp = h_sigm if(0.0 < self.p < 1.0): noise_shape = self._get_noise_shape(h_samp) h_samp = K.in_train_phase(K.dropout(h_samp, self.p, noise_shape), h_samp) return h_samp, h_pre, h_sigm
Example #3
Source File: rbm.py From keras_bn_library with MIT License | 5 votes |
def sample_x_given_h(self, h): x_pre = K.dot(h, self.Wrbm.T) + self.bx if(self.visible_unit_type == 'gaussian'): x_samp = self.scaling_x_given_h * x_pre return x_samp, x_samp, x_samp else: x_sigm = K.sigmoid(self.scaling_x_given_h * x_pre) x_samp = K.random_binomial(shape=x_sigm.shape, p=x_sigm) return x_samp, x_pre, x_sigm
Example #4
Source File: layers.py From Keras-progressive_growing_of_gans with MIT License | 5 votes |
def call(self, input,deterministic=False, **kwargs): if self.gain is not None: input = input * self.gain if deterministic or not self.strength: return input in_shape = self.input_shape in_axes = range(len(in_shape)) in_shape = [in_shape[axis] if in_shape[axis] is not None else input.shape[axis] for axis in in_axes] # None => Theano expr rnd_shape = [in_shape[axis] for axis in self.axes] broadcast = [self.axes.index(axis) if axis in self.axes else 'x' for axis in in_axes] one = K.constant(1) if self.mode == 'drop': p = one - self.strength rnd = K.random_binomial(tuple(rnd_shape), p=p, dtype=input.dtype) / p elif self.mode == 'mul': rnd = (one + self.strength) ** K.random_normal(tuple(rnd_shape), dtype=input.dtype) elif self.mode == 'prop': coef = self.strength * K.constant(np.sqrt(np.float32(self.input_shape[1]))) rnd = K.random_normal(tuple(rnd_shape), dtype=input.dtype) * coef + one else: raise ValueError('Invalid GDropLayer mode', self.mode) if self.normalize: rnd = rnd / K.sqrt(K.mean(rnd ** 2, axis=3, keepdims=True)) return input * K.permute_dimensions(rnd,broadcast)
Example #5
Source File: fractalnet.py From keras-fractalnet with MIT License | 5 votes |
def _random_arr(self, count, p): return K.random_binomial((count,), p=p)
Example #6
Source File: fractalnet.py From keras-fractalnet with MIT License | 5 votes |
def _build_global_switch(self): # A randomly sampled tensor that will signal if the batch # should use global or local droppath return K.equal(K.random_binomial((), p=self.global_p, seed=self.switch_seed), 1.)
Example #7
Source File: embeddings.py From ikelos with MIT License | 4 votes |
def call(self, x, mask=None): if isinstance(x, list): x,_ = x if mask is not None and isinstance(mask, list): mask,_ = mask if 0. < self.dropout < 1.: retain_p = 1. - self.dropout dims = self.W._keras_shape[:-1] B = K.random_binomial(dims, p=retain_p) * (1. / retain_p) B = K.expand_dims(B) W = K.in_train_phase(self.W * B, self.W) else: W = self.W if self.mode == 'matrix': return K.gather(W,x) elif self.mode == 'tensor': # quick and dirty: only allowing for 3dim inputs when it's tensor mode assert K.ndim(x) == 3 # put sequence on first; gather; take diagonal across shared batch dimension # in other words, W is (B, S, F) # incoming x is (B, S, A) inds = K.arange(self.W._keras_shape[0]) #out = K.gather(K.permute_dimensions(W, (1,0,2)), x).diagonal(axis1=0, axis2=3) #return K.permute_dimensions(out, (3,0,1,2)) ### method above doesn't do grads =.= # tensor abc goes to bac, indexed onto with xyz, goes to xyzac, # x == a, so shape to xayzc == xxyzc # take diagonal on first two: xyzc #out = K.colgather() out = K.gather(K.permute_dimensions(W, (1,0,2)), x) out = K.permute_dimensions(out, (0,3,1,2,4)) out = K.gather(out, (inds, inds)) return out else: raise Exception('sanity check. should not be here.') #all_dims = T.arange(len(self.W._keras_shape)) #first_shuffle = [all_dims[self.embed_dim]] + all_dims[:self.embed_dim] + all_dims[self.embed_dim+1:] ## 1. take diagonal from 0th to ## chang eof tactics ## embed on time or embed on batch. that's all I'm supporting. ## if it's embed on time, then, x.ndim+1 is where batch will be, and is what ## i need to take the diagonal over. ## now dim shuffle the xdims + 1 to the front. #todo: get second shuffle or maybe find diagonal calculations #out = K.gather(W, x) #return out ### reference #A = S(np.arange(60).reshape(3,4,5)) #x = S(np.random.randint(0, 4, (3,4,10))) #x_emb = A.dimshuffle(1,0,2)[x].dimshuffle(0,3,1,2,4)[T.arange(A.shape[0]), T.arange(A.shape[0])]
Example #8
Source File: models.py From DeepIV with MIT License | 4 votes |
def _get_sampler_by_string(self, loss): output = self.outputs[0] inputs = self.inputs if loss in ["MSE", "mse", "mean_squared_error"]: output += samplers.random_normal(K.shape(output), mean=0.0, std=1.0) draw_sample = K.function(inputs + [K.learning_phase()], [output]) def sample_gaussian(inputs, use_dropout=False): ''' Helper to draw samples from a gaussian distribution ''' return draw_sample(inputs + [int(use_dropout)])[0] return sample_gaussian elif loss == "binary_crossentropy": output = K.random_binomial(K.shape(output), p=output) draw_sample = K.function(inputs + [K.learning_phase()], [output]) def sample_binomial(inputs, use_dropout=False): ''' Helper to draw samples from a binomial distribution ''' return draw_sample(inputs + [int(use_dropout)])[0] return sample_binomial elif loss in ["mean_absolute_error", "mae", "MAE"]: output += samplers.random_laplace(K.shape(output), mu=0.0, b=1.0) draw_sample = K.function(inputs + [K.learning_phase()], [output]) def sample_laplace(inputs, use_dropout=False): ''' Helper to draw samples from a Laplacian distribution ''' return draw_sample(inputs + [int(use_dropout)])[0] return sample_laplace elif loss == "mixture_of_gaussians": pi, mu, log_sig = densities.split_mixture_of_gaussians(output, self.n_components) samples = samplers.random_gmm(pi, mu, K.exp(log_sig)) draw_sample = K.function(inputs + [K.learning_phase()], [samples]) return lambda inputs, use_dropout: draw_sample(inputs + [int(use_dropout)])[0] else: raise NotImplementedError("Unrecognised loss: %s.\ Cannot build a generic sampler" % loss)