Python tensorflow.compat.v1.constant_initializer() Examples
The following are 30
code examples of tensorflow.compat.v1.constant_initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: run_recurrent_model_boolq.py From language with Apache License 2.0 | 6 votes |
def compute_attention(t1, t2): """Build an attention matrix between 3-tensors `t1` and `t2`. Args: t1: <tf.float32>[batch, seq_len1, dim1] t2: <tf.float32>[batch, seq_len2, dim2] Returns: the similarity scores <tf.float32>[batch, seq_len1, seq_len2] """ dim = t1.shape.as_list()[2] init = tf.constant_initializer(1.0 / dim) t1_logits = ops.last_dim_weighted_sum(t1, "t1_w") t2_logits = ops.last_dim_weighted_sum(t2, "t2_w") dot_w = tf.get_variable( "dot_w", shape=dim, initializer=init, dtype=tf.float32) # Compute x * dot_weights first, then batch mult with x dots = t1 * tf.expand_dims(tf.expand_dims(dot_w, 0), 0) dot_logits = tf.matmul(dots, t2, transpose_b=True) return dot_logits + \ tf.expand_dims(t1_logits, 2) + \ tf.expand_dims(t2_logits, 1)
Example #2
Source File: transformer_memory.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _address_content(self, x): """Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size]. """ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
Example #3
Source File: discretization.py From tensor2tensor with Apache License 2.0 | 6 votes |
def get_vq_codebook(codebook_size, hidden_size): """Get lookup table for VQ bottleneck.""" with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): means = tf.get_variable( name="means", shape=[codebook_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[codebook_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #4
Source File: bounds_test.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def testBatchNormIntervalBounds(self): z = tf.constant([[1, 2, 3]], dtype=tf.float32) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) g = tf.reshape(tf.range(-1, 2, dtype=tf.float32), [1, 3]) b = tf.reshape(tf.range(3, dtype=tf.float32), [1, 3]) batch_norm = ibp.BatchNorm(scale=True, offset=True, eps=0., initializers={ 'gamma': lambda *args, **kwargs: g, 'beta': lambda *args, **kwargs: b, 'moving_mean': tf.constant_initializer(1.), 'moving_variance': tf.constant_initializer(4.), }) batch_norm(z, is_training=False) batch_norm = ibp.BatchNormWrapper(batch_norm) # Test propagation. output_bounds = batch_norm.propagate_bounds(input_bounds) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) l, u = sess.run([output_bounds.lower, output_bounds.upper]) self.assertAlmostEqual([[-.5, 1., 2.5]], l.tolist()) self.assertAlmostEqual([[.5, 1., 3.5]], u.tolist())
Example #5
Source File: bounds_test.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def testConv1dIntervalBounds(self): m = snt.Conv1D( output_channels=1, kernel_shape=2, padding='VALID', stride=1, use_bias=True, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(2.), }) z = tf.constant([3, 4], dtype=tf.float32) z = tf.reshape(z, [1, 2, 1]) m(z) # Connect to create weights. m = ibp.LinearConv1dWrapper(m) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) output_bounds = m.propagate_bounds(input_bounds) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) l, u = sess.run([output_bounds.lower, output_bounds.upper]) l = l.item() u = u.item() self.assertAlmostEqual(7., l) self.assertAlmostEqual(11., u)
Example #6
Source File: util.py From nni with MIT License | 6 votes |
def lstm(xs, ms, s, scope, nh, init_scale=1.0): """lstm cell""" _, nin = [v.value for v in xs[0].get_shape()] # the first is nbatch with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s
Example #7
Source File: bounds_test.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def testFCIntervalBounds(self): m = snt.Linear(1, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(2.), }) z = tf.constant([[1, 2, 3]], dtype=tf.float32) m(z) # Connect to create weights. m = ibp.LinearFCWrapper(m) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) output_bounds = m.propagate_bounds(input_bounds) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) l, u = sess.run([output_bounds.lower, output_bounds.upper]) l = l.item() u = u.item() self.assertAlmostEqual(5., l) self.assertAlmostEqual(11., u)
Example #8
Source File: maml_inner_loop_test.py From tensor2robot with Apache License 2.0 | 6 votes |
def learned_model_train_fn(features, labels, inference_outputs, mode=None, config=None, params=None): """A model_train_fn where the loss function itself is learned.""" del features, labels, mode, config, params with tf.variable_scope('learned_loss', reuse=tf.AUTO_REUSE): learned_label = tf.get_variable( 'learned_label', shape=(1,), dtype=tf.float32, initializer=tf.constant_initializer([1.0], dtype=tf.float32)) return tf.losses.mean_squared_error( labels=learned_label, predictions=inference_outputs['prediction'])
Example #9
Source File: model_utils.py From language with Apache License 2.0 | 6 votes |
def layer_norm(layer_inputs, hidden_size): """Implements layer norm from [Ba et al. 2016] Layer Normalization. See eqn. 4 in (https://arxiv.org/pdf/1607.06450.pdf). Args: layer_inputs (tensor): The inputs to the layer. shape <float32>[batch_size, hidden_size] hidden_size (int): Dimensionality of the hidden layer. Returns: normalized (tensor): layer_inputs, normalized over all the hidden units in the layer. shape <float32>[batch_size, hidden_size] """ mean, var = tf.nn.moments(layer_inputs, [1], keep_dims=True) with tf.variable_scope("layernorm", reuse=tf.AUTO_REUSE): gain = tf.get_variable( "gain", shape=[hidden_size], initializer=tf.constant_initializer(1)) bias = tf.get_variable( "bias", shape=[hidden_size], initializer=tf.constant_initializer(0)) normalized = gain * (layer_inputs - mean) / tf.sqrt(var + _EPSILON) + bias return normalized
Example #10
Source File: model_utils.py From language with Apache License 2.0 | 6 votes |
def layer_norm(layer_inputs, hidden_size): """Implements layer norm from [Ba et al. 2016] Layer Normalization. See eqn. 4 in (https://arxiv.org/pdf/1607.06450.pdf). Args: layer_inputs (tensor): The inputs to the layer. shape <float32>[batch_size, hidden_size] hidden_size (int): Dimensionality of the hidden layer. Returns: normalized (tensor): layer_inputs, normalized over all the hidden units in the layer. shape <float32>[batch_size, hidden_size] """ mean, var = tf.nn.moments(layer_inputs, [1], keep_dims=True) with tf.variable_scope("layernorm", reuse=tf.AUTO_REUSE): gain = tf.get_variable( "gain", shape=[hidden_size], initializer=tf.constant_initializer(1)) bias = tf.get_variable( "bias", shape=[hidden_size], initializer=tf.constant_initializer(0)) normalized = gain * (layer_inputs - mean) / tf.sqrt(var + _EPSILON) + bias return normalized
Example #11
Source File: rnn.py From magenta with Apache License 2.0 | 6 votes |
def build(self, inputs_shape): if not inputs_shape[1]: raise ValueError( "Expecting inputs_shape[1] to be set: %s" % str(inputs_shape)) input_size = int(inputs_shape[1]) self._kernel = self.add_variable( self._names["W"], [input_size + self._num_units, self._num_units * 4]) self._bias = self.add_variable( self._names["b"], [self._num_units * 4], initializer=tf.constant_initializer(0.0)) if self._use_peephole: self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units]) self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units]) self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units]) self.built = True
Example #12
Source File: rnn_test.py From magenta with Apache License 2.0 | 6 votes |
def testInputProjectionWrapper(self): with self.cached_session() as sess: with tf.variable_scope( "root", initializer=tf.constant_initializer(0.5)): x = tf.zeros([1, 2]) m = tf.zeros([1, 3]) cell = contrib_rnn.InputProjectionWrapper( rnn_cell.GRUCell(3), num_proj=3) g, new_m = cell(x, m) sess.run([tf.global_variables_initializer()]) res = sess.run([g, new_m], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1, 0.1]]) }) self.assertEqual(res[1].shape, (1, 3)) # The numbers in results were not calculated, this is just a smoke test. self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
Example #13
Source File: transformer_nat.py From tensor2tensor with Apache License 2.0 | 6 votes |
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #14
Source File: optimization_test.py From albert with Apache License 2.0 | 6 votes |
def test_adam(self): with self.test_session() as sess: w = tf.get_variable( "w", shape=[3], initializer=tf.constant_initializer([0.1, -0.2, -0.1])) x = tf.constant([0.4, 0.2, -0.5]) loss = tf.reduce_mean(tf.square(x - w)) tvars = tf.trainable_variables() grads = tf.gradients(loss, tvars) global_step = tf.train.get_or_create_global_step() optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2) train_op = optimizer.apply_gradients(list(zip(grads, tvars)), global_step) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) for _ in range(100): sess.run(train_op) w_np = sess.run(w) self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
Example #15
Source File: cifarnet.py From models with Apache License 2.0 | 6 votes |
def cifarnet_arg_scope(weight_decay=0.004): """Defines the default cifarnet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d], weights_initializer=tf.truncated_normal_initializer( stddev=5e-2), activation_fn=tf.nn.relu): with slim.arg_scope( [slim.fully_connected], biases_initializer=tf.constant_initializer(0.1), weights_initializer=trunc_normal(0.04), weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu) as sc: return sc
Example #16
Source File: model.py From super-resolution-videos with The Unlicense | 5 votes |
def SRGAN_g(t_image, is_train=False, reuse=False): """ Generator in Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network feature maps (n) and stride (s) feature maps (n) and stride (s) """ w_init = tf.random_normal_initializer(stddev=0.02) b_init = None # tf.constant_initializer(value=0.0) g_init = tf.random_normal_initializer(1., 0.02) with tf.variable_scope("SRGAN_g", reuse=reuse) as vs: tl.layers.set_name_reuse(reuse) n = InputLayer(t_image, name='in') n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, name='n64s1/c') temp = n # B residual blocks for i in range(16): nn = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c1/%s' % i) nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='n64s1/b1/%s' % i) nn = Conv2d(nn, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c2/%s' % i) nn = BatchNormLayer(nn, is_train=is_train, gamma_init=g_init, name='n64s1/b2/%s' % i) nn = ElementwiseLayer([n, nn], tf.add, 'b_residual_add/%s' % i) n = nn n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c/m') n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='n64s1/b/m') n = ElementwiseLayer([n, temp], tf.add, 'add3') # B residual blacks end n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/1') n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/1') n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/2') n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/2') n = Conv2d(n, 3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init, name='out') return n
Example #17
Source File: utils.py From models with Apache License 2.0 | 5 votes |
def _quant_var( name, initializer_val, vars_collection=tf.GraphKeys.MOVING_AVERAGE_VARIABLES, ): """Create an var for storing the min/max quantization range.""" return contrib_framework.model_variable( name, shape=[], initializer=tf.constant_initializer(initializer_val), collections=[vars_collection], trainable=False)
Example #18
Source File: transformer.py From mesh with Apache License 2.0 | 5 votes |
def sublayer_rezero(x, layer_stack, context, initial_value=0.0): """Multiply by zero-initialized scalar (residual not included).""" del layer_stack rezero_weight = mtf.get_variable( x.mesh, "rezero_weight", shape=context.model.ensemble_dims, dtype=context.variable_dtype, initializer=tf.constant_initializer(initial_value)) return x * rezero_weight
Example #19
Source File: alexnet.py From models with Apache License 2.0 | 5 votes |
def alexnet_v2_arg_scope(weight_decay=0.0005): with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer(0.1), weights_regularizer=slim.l2_regularizer(weight_decay)): with slim.arg_scope([slim.conv2d], padding='SAME'): with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc: return arg_sc
Example #20
Source File: model.py From gpt2-estimator with MIT License | 5 votes |
def norm(x, scope, *, axis=-1, epsilon=1e-5): """Normalize to mean = 0, std = 1, then do a diagonal affine transform.""" with tf.variable_scope(scope): n_state = x.shape[-1] g = tf.get_variable( 'g', [n_state], initializer=tf.constant_initializer(1)) b = tf.get_variable( 'b', [n_state], initializer=tf.constant_initializer(0)) u = tf.reduce_mean(x, axis=axis, keepdims=True) s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True) x = (x - u) * tf.rsqrt(s + epsilon) x = x*g + b return x
Example #21
Source File: sar.py From top-k-rec with GNU General Public License v3.0 | 5 votes |
def build_graph(self) -> 'List[tf.placeholder[tf.int32]]': with tf.variable_scope('bpr', reuse=tf.AUTO_REUSE): u = tf.placeholder(tf.int32, [None]) i = tf.placeholder(tf.int32, [None]) j = tf.placeholder(tf.int32, [None]) self.__ue = tf.get_variable(name="user_embed", shape=[self.n_users, self.k], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) self.__ie = tf.get_variable(name="item_embed", shape=[self.n_items, self.k], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) self.__ib = tf.get_variable(name="item_bias", shape=[self.n_items], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) ueb = tf.nn.embedding_lookup(self.__ue, u) ieb = tf.nn.embedding_lookup(self.__ie, i) jeb = tf.nn.embedding_lookup(self.__ie, j) ib = tf.nn.embedding_lookup(self.__ib, i) jb = tf.nn.embedding_lookup(self.__ib, j) x_ui = tf.reduce_sum(tf.multiply(ueb, ieb), 1) x_uj = tf.reduce_sum(tf.multiply(ueb, jeb), 1) x_uij = ib - jb + x_ui - x_uj with tf.name_scope('output'): self.pred = tf.matmul(ueb, tf.transpose(ieb)) + ib if self.mode == 'l2': self.obj = tf.reduce_sum(tf.log(1 + tf.exp(-x_uij))) + \ 0.5 * tf.reduce_sum(ueb ** 2 * self.lu + ieb ** 2 * self.li + jeb ** 2 * self.lj) + \ 0.5 * tf.reduce_sum(ib ** 2 + jb ** 2) * self.lb else: self.obj = tf.reduce_sum(tf.log(1 + tf.exp(-x_uij))) + \ tf.reduce_sum(tf.abs(ueb) * self.lu + tf.abs(ieb) * self.li + tf.abs(jeb) * self.lj) + \ tf.reduce_sum(tf.abs(ib) + tf.abs(jb)) * self.lb self.solver = tf.train.RMSPropOptimizer(self.lr).minimize(self.obj) return u, i, j
Example #22
Source File: bpr.py From top-k-rec with GNU General Public License v3.0 | 5 votes |
def build_graph(self) -> 'List[tf.placeholder[tf.int32]]': with tf.variable_scope('bpr', reuse=tf.AUTO_REUSE): u = tf.placeholder(tf.int32, [None]) i = tf.placeholder(tf.int32, [None]) j = tf.placeholder(tf.int32, [None]) self.__ue = tf.get_variable(name="user_embed", shape=[self.n_users, self.k], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) self.__ie = tf.get_variable(name="item_embed", shape=[self.n_items, self.k], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) self.__ib = tf.get_variable(name="item_bias", shape=[self.n_items], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) ueb = tf.nn.embedding_lookup(self.__ue, u) ieb = tf.nn.embedding_lookup(self.__ie, i) jeb = tf.nn.embedding_lookup(self.__ie, j) ib = tf.nn.embedding_lookup(self.__ib, i) jb = tf.nn.embedding_lookup(self.__ib, j) x_ui = tf.reduce_sum(tf.multiply(ueb, ieb), 1) x_uj = tf.reduce_sum(tf.multiply(ueb, jeb), 1) x_uij = ib - jb + x_ui - x_uj with tf.name_scope('output'): self.pred = tf.matmul(ueb, tf.transpose(ieb)) + ib if self.mode == 'l2': self.obj = tf.reduce_sum(tf.log(1+tf.exp(-x_uij)))+\ 0.5 * tf.reduce_sum(ueb**2*self.lu+ieb**2*self.li+jeb**2*self.lj)+\ 0.5 * tf.reduce_sum(ib**2+jb**2)*self.lb else: self.obj = tf.reduce_sum(tf.log(1+tf.exp(-x_uij)))+\ tf.reduce_sum(tf.abs(ueb)*self.lu+tf.abs(ieb)*self.li+tf.abs(jeb)*self.lj)+\ tf.reduce_sum(tf.abs(ib)+tf.abs(jb))*self.lb self.solver = tf.train.RMSPropOptimizer(self.lr).minimize(self.obj) return u, i, j
Example #23
Source File: mdn.py From tensor2robot with Apache License 2.0 | 5 votes |
def predict_mdn_params( inputs, num_alphas, sample_size, condition_sigmas = False): """Outputs parameters of a mixture density network given inputs. Args: inputs: A tensor input to compute the MDN parameters from. num_alphas: The number of mixture components. sample_size: Scalar, the size of a single distribution sample. condition_sigmas: If True, the sigma params are conditioned on `inputs`. Otherwise they are simply learned variables. Returns: dist_params: A tensor of shape [..., num_alphas + 2 * num_alphas * sample_size] aux_output: auxiliary output of shape [..., aux_output_dim] if aux_output_dim is > 0. """ num_mus = num_alphas * sample_size # Assume isotropic gaussian components. num_sigmas = num_alphas * sample_size num_fc_outputs = num_alphas + num_mus if condition_sigmas: num_fc_outputs = num_fc_outputs + num_sigmas dist_params = slim.fully_connected( inputs, num_fc_outputs, activation_fn=None, scope='mdn_params') if not condition_sigmas: # Sigmas initialized so that softplus(sigmas) = 1. sigmas = tf.get_variable( 'mdn_stddev_inputs', shape=[num_sigmas], dtype=tf.float32, initializer=tf.constant_initializer(np.log(np.e - 1))) tiled_sigmas = tf.tile( sigmas[None], tf.stack([tf.shape(dist_params)[0], 1])) dist_params = tf.concat([dist_params, tiled_sigmas], axis=-1) return dist_params
Example #24
Source File: loss_test.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def _build(self, z0, is_training=False): self._m = snt.Linear(2, initializers={ 'w': tf.constant_initializer(1.), 'b': lambda *unsed_args, **unused_kwargs: tf.constant([0., 1.]), }) return self._m(z0)
Example #25
Source File: attacks_test.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def testEndToEnd(self, predictor_cls, attack_cls, optimizer_cls, epsilon, restarted=False): # l-\infty norm of perturbation ball. if isinstance(epsilon, list): # We test the ability to have different epsilons across dimensions. epsilon = tf.constant([epsilon], dtype=tf.float32) bounds = (-.5, 2.5) # Create a simple network. m = snt.Linear(1, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(1.), }) z = tf.constant([[1, 2]], dtype=tf.float32) predictor = predictor_cls(m, self) # Not important for the test but needed. labels = tf.constant([1], dtype=tf.int64) # We create two attacks to maximize and then minimize the output. max_spec = ibp.LinearSpecification(tf.constant([[[1.]]])) max_attack = attack_cls(predictor, max_spec, epsilon, input_bounds=bounds, optimizer_builder=optimizer_cls) if restarted: max_attack = ibp.RestartedAttack(max_attack, num_restarts=10) z_max = max_attack(z, labels) min_spec = ibp.LinearSpecification(tf.constant([[[-1.]]])) min_attack = attack_cls(predictor, min_spec, epsilon, input_bounds=bounds, optimizer_builder=optimizer_cls) if restarted: min_attack = ibp.RestartedAttack(min_attack, num_restarts=10) z_min = min_attack(z, labels) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) z_max_values, z_min_values = sess.run([z_max, z_min]) z_max_values = z_max_values[0] z_min_values = z_min_values[0] self.assertAlmostEqual(2., z_max_values[0]) self.assertAlmostEqual(2.5, z_max_values[1]) self.assertAlmostEqual(0., z_min_values[0]) self.assertAlmostEqual(1., z_min_values[1])
Example #26
Source File: bounds_test.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def testCaching(self): m = snt.Linear(1, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(2.), }) z = tf.placeholder(shape=(1, 3), dtype=tf.float32) m(z) # Connect to create weights. m = ibp.LinearFCWrapper(m) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) output_bounds = m.propagate_bounds(input_bounds) input_bounds.enable_caching() output_bounds.enable_caching() update_all_caches_op = tf.group([input_bounds.update_cache_op, output_bounds.update_cache_op]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) # Initialise the caches based on the model inputs. sess.run(update_all_caches_op, feed_dict={z: [[1., 2., 3.]]}) l, u = sess.run([output_bounds.lower, output_bounds.upper]) l = l.item() u = u.item() self.assertAlmostEqual(5., l) self.assertAlmostEqual(11., u) # Update the cache based on a different set of inputs. sess.run([output_bounds.update_cache_op], feed_dict={z: [[2., 3., 7.]]}) # We only updated the output bounds' cache. # This asserts that the computation depends on the underlying # input bounds tensor, not on cached version of it. # (Thus it doesn't matter what order the caches are updated.) l, u = sess.run([output_bounds.lower, output_bounds.upper]) l = l.item() u = u.item() self.assertAlmostEqual(11., l) self.assertAlmostEqual(17., u)
Example #27
Source File: fastlin_test.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def testConv1dSymbolicBounds(self): m = snt.Conv1D( output_channels=1, kernel_shape=(2), padding='VALID', stride=1, use_bias=True, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(3.), }) z = tf.constant([3, 4], dtype=tf.float32) z = tf.reshape(z, [1, 2, 1]) m(z) # Connect to create weights. m = ibp.LinearConv1dWrapper(m) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) input_bounds = ibp.SymbolicBounds.convert(input_bounds) output_bounds = m.propagate_bounds(input_bounds) output_bounds = ibp.IntervalBounds.convert(output_bounds) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) l, u = sess.run([output_bounds.lower, output_bounds.upper]) l = l.item() u = u.item() self.assertAlmostEqual(8., l) self.assertAlmostEqual(12., u)
Example #28
Source File: fastlin_test.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def testConv2dSymbolicBounds(self): m = snt.Conv2D( output_channels=1, kernel_shape=(2, 2), padding='VALID', stride=1, use_bias=True, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(2.), }) z = tf.constant([1, 2, 3, 4], dtype=tf.float32) z = tf.reshape(z, [1, 2, 2, 1]) m(z) # Connect to create weights. m = ibp.LinearConv2dWrapper(m) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) input_bounds = ibp.SymbolicBounds.convert(input_bounds) output_bounds = m.propagate_bounds(input_bounds) output_bounds = ibp.IntervalBounds.convert(output_bounds) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) l, u = sess.run([output_bounds.lower, output_bounds.upper]) l = l.item() u = u.item() self.assertAlmostEqual(8., l) self.assertAlmostEqual(16., u)
Example #29
Source File: fastlin_test.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def testFCSymbolicBounds(self): m = snt.Linear(1, initializers={ 'w': tf.constant_initializer(1.), 'b': tf.constant_initializer(2.), }) z = tf.constant([[1, 2, 3]], dtype=tf.float32) m(z) # Connect to create weights. m = ibp.LinearFCWrapper(m) input_bounds = ibp.IntervalBounds(z - 1., z + 1.) input_bounds = ibp.SymbolicBounds.convert(input_bounds) output_bounds = m.propagate_bounds(input_bounds) concrete_bounds = ibp.IntervalBounds.convert(output_bounds) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) l, u, cl, cu = sess.run([output_bounds.lower, output_bounds.upper, concrete_bounds.lower, concrete_bounds.upper]) self.assertTrue(np.all(l.w == 1.)) self.assertTrue(np.all(l.b == 2.)) self.assertAlmostEqual([[0, 1, 2]], l.lower.tolist()) self.assertAlmostEqual([[2, 3, 4]], l.upper.tolist()) self.assertTrue(np.all(u.w == 1.)) self.assertTrue(np.all(u.b == 2.)) self.assertAlmostEqual([[0, 1, 2]], u.lower.tolist()) self.assertAlmostEqual([[2, 3, 4]], u.upper.tolist()) cl = cl.item() cu = cu.item() self.assertAlmostEqual(5., cl) self.assertAlmostEqual(11., cu)
Example #30
Source File: rnn_test.py From magenta with Apache License 2.0 | 5 votes |
def testBasicLSTMCellWithStateTupleLayerNorm(self): """The results of LSTMCell and LayerNormBasicLSTMCell should be the same.""" with self.cached_session() as sess: with tf.variable_scope( "root", initializer=tf.constant_initializer(0.5)): x = tf.zeros([1, 2]) c0 = tf.zeros([1, 2]) h0 = tf.zeros([1, 2]) state0 = rnn_cell.LSTMStateTuple(c0, h0) c1 = tf.zeros([1, 2]) h1 = tf.zeros([1, 2]) state1 = rnn_cell.LSTMStateTuple(c1, h1) cell = rnn_cell.MultiRNNCell([ contrib_rnn.LayerNormBasicLSTMCell( 2, layer_norm=True, norm_gain=1.0, norm_shift=0.0) for _ in range(2) ]) h, (s0, s1) = cell(x, (state0, state1)) sess.run([tf.global_variables_initializer()]) res = sess.run( [h, s0, s1], { x.name: np.array([[1., 1.]]), c0.name: 0.1 * np.asarray([[0, 1]]), h0.name: 0.1 * np.asarray([[2, 3]]), c1.name: 0.1 * np.asarray([[4, 5]]), h1.name: 0.1 * np.asarray([[6, 7]]), }) expected_h = np.array([[-0.38079708, 0.38079708]]) expected_h0 = np.array([[-0.38079708, 0.38079708]]) expected_c0 = np.array([[-1.0, 1.0]]) expected_h1 = np.array([[-0.38079708, 0.38079708]]) expected_c1 = np.array([[-1.0, 1.0]]) self.assertEqual(len(res), 3) self.assertAllClose(res[0], expected_h, 1e-5) self.assertAllClose(res[1].c, expected_c0, 1e-5) self.assertAllClose(res[1].h, expected_h0, 1e-5) self.assertAllClose(res[2].c, expected_c1, 1e-5) self.assertAllClose(res[2].h, expected_h1, 1e-5)