Python tensorflow.compat.v1.set_random_seed() Examples
The following are 30
code examples of tensorflow.compat.v1.set_random_seed().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: glow_ops_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def test_invertibility(self, op, name, dropout=0.0): with tf.Graph().as_default(): tf.set_random_seed(42) x = tf.random_uniform(shape=(16, 32, 32, 4)) if op in [glow_ops.affine_coupling, glow_ops.additive_coupling]: with arg_scope([glow_ops.get_dropout], init=False): x_inv, _ = op(name, x, reverse=False, dropout=dropout) x_inv_inv, _ = op(name, x_inv, reverse=True, dropout=dropout) else: x_inv, _ = op(name, x, reverse=False) x_inv_inv, _ = op(name, x_inv, reverse=True) with tf.Session() as session: session.run(tf.global_variables_initializer()) diff = session.run(x - x_inv_inv) self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
Example #2
Source File: evolved_transformer_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testGreedyFastTPUVsNonTPU(self): tf.set_random_seed(1234) decode_length = DECODE_LENGTH model, features = self._create_greedy_infer_model() with tf.variable_scope(tf.get_variable_scope(), reuse=True): fast_result_non_tpu = model._greedy_infer( features, decode_length, use_tpu=False)["outputs"] fast_result_tpu = model._greedy_infer( features, decode_length, use_tpu=True)["outputs"] with self.test_session(): fast_non_tpu_res = fast_result_non_tpu.eval() fast_tpu_res = fast_result_tpu.eval() self.assertEqual(fast_tpu_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) self.assertAllClose(fast_tpu_res, fast_non_tpu_res)
Example #3
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #4
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device)
Example #5
Source File: mnist_benchmark.py From autograph with Apache License 2.0 | 6 votes |
def get_data_and_params(): """Set up input dataset and variables.""" (train_x, train_y), _ = tf.keras.datasets.mnist.load_data() tf.set_random_seed(0) hparams = contrib_training.HParams( batch_size=200, learning_rate=0.1, train_steps=101, ) dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)) dataset = dataset.repeat() dataset = dataset.shuffle(hparams.batch_size * 10) dataset = dataset.batch(hparams.batch_size) def reshape_ex(x, y): return (tf.to_float(tf.reshape(x, (-1, 28 * 28))) / 256.0, tf.one_hot(tf.squeeze(y), 10)) dataset = dataset.map(reshape_ex) w = tf.get_variable('w0', (28 * 28, 10)) b = tf.get_variable('b0', (10,), initializer=tf.zeros_initializer()) opt = tf.train.GradientDescentOptimizer(hparams.learning_rate) return dataset, opt, hparams, w, b
Example #6
Source File: flop_regularizer_test.py From morph-net with Apache License 2.0 | 6 votes |
def testFlopRegularizerDontConvertToVariable(self): tf.reset_default_graph() tf.set_random_seed(1234) x = tf.constant(1.0, shape=[2, 6], name='x', dtype=tf.float32) w = tf.Variable(tf.truncated_normal([6, 4], stddev=1.0), use_resource=True) net = tf.matmul(x, w) # Create FLOPs network regularizer. threshold = 0.9 flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op], threshold, 0) with self.cached_session(): tf.global_variables_initializer().run() flop_reg.get_regularization_term().eval()
Example #7
Source File: op_regularizer_manager_test.py From morph-net with Apache License 2.0 | 6 votes |
def setUp(self): super(OpRegularizerManagerTest, self).setUp() tf.set_random_seed(12) np.random.seed(665544) IndexOpRegularizer.reset_index() # Create default OpHandler dict for testing. self._default_op_handler_dict = collections.defaultdict( grouping_op_handler.GroupingOpHandler) self._default_op_handler_dict.update({ 'FusedBatchNormV3': IndexBatchNormSourceOpHandler(), 'Conv2D': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'ConcatV2': concat_op_handler.ConcatOpHandler(), 'DepthwiseConv2dNative': depthwise_convolution_op_handler.DepthwiseConvolutionOpHandler(), })
Example #8
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2)
Example #9
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #10
Source File: dcgan_test.py From models with Apache License 2.0 | 6 votes |
def test_generator_graph(self): tf.set_random_seed(1234) # Check graph construction for a number of image size/depths and batch # sizes. for i, batch_size in zip(xrange(3, 7), xrange(3, 8)): tf.reset_default_graph() final_size = 2 ** i noise = tf.random.normal([batch_size, 64]) image, end_points = dcgan.generator( noise, depth=32, final_size=final_size) self.assertAllEqual([batch_size, final_size, final_size, 3], image.shape.as_list()) expected_names = ['deconv%i' % j for j in xrange(1, i)] + ['logits'] self.assertSetEqual(set(expected_names), set(end_points.keys())) # Check layer depths. for j in range(1, i): layer = end_points['deconv%i' % j] self.assertEqual(32 * 2**(i-j-1), layer.get_shape().as_list()[-1])
Example #11
Source File: preprocessors_test.py From text-to-text-transfer-transformer with Apache License 2.0 | 5 votes |
def test_noise_token_to_random_token_or_sentinel(self): tf.set_random_seed(55) vocabulary = test_utils.mock_vocabulary({'foo': 10}, vocab_size=1000) tokens = tf.constant(list(range(10))) noise_mask = tf.constant( [True, True, False, False, True, False, True, True, True, True]) expected_output = [436, 999, 2, 3, 999, 5, 999, 999, 999, 999] output = self.evaluate(prep.noise_token_to_random_token_or_sentinel( tokens, noise_mask, vocabulary, random_prob=0.2)) self.assertAllEqual(output, expected_output)
Example #12
Source File: data_reader_test.py From tensor2tensor with Apache License 2.0 | 5 votes |
def setUpClass(cls): tf.set_random_seed(1) cls.problem = registry.problem("test_problem") cls.data_dir = tempfile.gettempdir() cls.filepatterns = generate_test_data(cls.problem, cls.data_dir)
Example #13
Source File: recom_mlp.py From cornac with Apache License 2.0 | 5 votes |
def _build_graph(self): import tensorflow.compat.v1 as tf from .ops import mlp, loss_fn, train_fn super()._build_graph() with self.graph.as_default(): tf.set_random_seed(self.seed) self.user_id = tf.placeholder(shape=[None], dtype=tf.int32, name="user_id") self.item_id = tf.placeholder(shape=[None], dtype=tf.int32, name="item_id") self.labels = tf.placeholder( shape=[None, 1], dtype=tf.float32, name="labels" ) self.interaction = mlp( uid=self.user_id, iid=self.item_id, num_users=self.num_users, num_items=self.num_items, layers=self.layers, reg_layers=self.reg_layers, act_fn=self.act_fn, seed=self.seed, ) logits = tf.layers.dense( self.interaction, units=1, name="logits", kernel_initializer=tf.initializers.lecun_uniform(self.seed), ) self.prediction = tf.nn.sigmoid(logits) self.loss = loss_fn(labels=self.labels, logits=logits) self.train_op = train_fn( self.loss, learning_rate=self.lr, learner=self.learner ) self.initializer = tf.global_variables_initializer() self.saver = tf.train.Saver() self._sess_init()
Example #14
Source File: resnet_v2_test.py From models with Apache License 2.0 | 5 votes |
def testAtrousFullyConvolutionalValues(self): """Verify dense feature extraction with atrous convolution.""" nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) # Dense feature extraction followed by subsampling. output, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if output_stride is None: factor = 1 else: factor = nominal_stride // output_stride output = resnet_utils.subsample(output, factor) # Make the two networks use the same weights. tf.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
Example #15
Source File: dcgan_test.py From models with Apache License 2.0 | 5 votes |
def test_generator_run(self): tf.set_random_seed(1234) noise = tf.random.normal([100, 64]) image, _ = dcgan.generator(noise) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) image.eval()
Example #16
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticlone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(len(clones), num_clones) for i, clone in enumerate(clones): self.assertEqual( clone.outputs.op.name, 'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope) self.assertEqual(len(update_ops), 2) self.assertEqual(clone.scope, 'clone_%d/' % i) self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
Example #17
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=2) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) for i, v in enumerate(slim.get_variables()): t = i % 2 self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t) self.assertDeviceEqual(v.device, v.value().device) self.assertEqual(len(clones), 2) for i, clone in enumerate(clones): self.assertEqual( clone.outputs.op.name, 'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i) self.assertEqual(clone.scope, 'clone_%d/' % i) self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
Example #18
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticlone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), num_clones * 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '') self.assertDeviceEqual(v.device, 'CPU:0')
Example #19
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticloneCPU(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier model_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones, clone_on_cpu=True) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, model_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), num_clones * 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '') self.assertDeviceEqual(v.device, 'CPU:0')
Example #20
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier model_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, model_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0') self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
Example #21
Source File: preprocessors_test.py From text-to-text-transfer-transformer with Apache License 2.0 | 5 votes |
def test_noise_token_to_gathered_token(self): tf.set_random_seed(55) vocabulary = test_utils.mock_vocabulary({'foo': 10}, vocab_size=1000) tokens = tf.constant([10, 11, 12, 13, 14, 15]) noise_mask = tf.constant([True, True, False, False, True, False]) expected_output = [11, 11, 12, 13, 15, 15] output = self.evaluate(prep.noise_token_to_gathered_token( tokens, noise_mask, vocabulary)) self.assertAllEqual(output, expected_output)
Example #22
Source File: preprocessors_test.py From text-to-text-transfer-transformer with Apache License 2.0 | 5 votes |
def test_noise_token_to_random_token(self): tf.set_random_seed(55) vocabulary = test_utils.mock_vocabulary({'foo': 10}, vocab_size=1000) tokens = tf.constant([10, 11, 12, 13, 14, 15]) noise_mask = tf.constant([True, True, False, False, True, False]) expected_output = [811, 309, 12, 13, 451, 15] output = self.evaluate(prep.noise_token_to_random_token( tokens, noise_mask, vocabulary)) self.assertAllEqual(output, expected_output)
Example #23
Source File: benchmark_cnn_test.py From benchmarks with Apache License 2.0 | 5 votes |
def _train_and_eval_local(self, params, check_output_values=False, max_final_loss=10., skip=None, use_test_preprocessor=True): # TODO(reedwm): check_output_values should default to True and be enabled # on every test. Currently, if check_output_values=True and the calls to # tf.set_random_seed(...) and np.seed(...) are passed certain seed values in # benchmark_cnn.py, then most tests will fail. This indicates the tests # are brittle and could fail with small changes when # check_output_values=True, so check_output_values defaults to False for # now. def run_fn(run_type, inner_params): del run_type if use_test_preprocessor: return [ self._run_benchmark_cnn_with_black_and_white_images(inner_params) ] else: return [self._run_benchmark_cnn(inner_params)] return test_util.train_and_eval(self, run_fn, params, check_output_values=check_output_values, max_final_loss=max_final_loss, skip=skip)
Example #24
Source File: preprocessors_test.py From text-to-text-transfer-transformer with Apache License 2.0 | 5 votes |
def test_permute_noise_tokens(self): tf.set_random_seed(55) vocabulary = test_utils.mock_vocabulary({'foo': 10}, vocab_size=1000) tokens = tf.constant([10, 11, 12, 13, 14, 15]) noise_mask = tf.constant([True, True, False, False, True, False]) expected_output = [11, 14, 12, 13, 10, 15] output = self.evaluate(prep.permute_noise_tokens( tokens, noise_mask, vocabulary)) self.assertAllEqual(output, expected_output)
Example #25
Source File: preprocessors_test.py From text-to-text-transfer-transformer with Apache License 2.0 | 5 votes |
def test_random_spans_noise_mask(self): tf.set_random_seed(55) length = 32 noise_density = 0.25 mean_noise_span_length = 2.0 # there should be 4 noise spans with a total length of 8. noise_mask = prep.random_spans_noise_mask( length, noise_density, mean_noise_span_length) output = self.evaluate(tf.cast(noise_mask, tf.int32)) expected_output = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1] self.assertAllEqual(output, expected_output)
Example #26
Source File: keynet_architecture.py From pyslam with GNU General Public License v3.0 | 5 votes |
def __init__(self, args, MSIP_sizes=[]): # Start Key.Net architecture self.pyramid_levels = args.num_levels_within_net self.factor_scaling = args.factor_scaling_pyramid self.num_blocks = args.num_learnable_blocks self.num_filters = args.num_filters self.conv_kernel_size = args.conv_kernel_size self.ksize = args.nms_size self.batch_size = 16 self.patch_size = 32 tf.set_random_seed(args.random_seed) np.random.seed(args.random_seed) name_scope = tf_contrib.framework.get_name_scope() # Smooth Gausian Filter gaussian_avg = gaussian_multiple_channels(1, 1.5) self.gaussian_avg = tf.constant(gaussian_avg, name=name_scope + '_Gaussian_avg') # Sobel derivatives kernel_x, kernel_y = create_derivatives_kernel() self.kernel_filter_dx = tf.constant(kernel_x, name=name_scope + '_kernel_filter_dx') self.kernel_filter_dy = tf.constant(kernel_y, name=name_scope + '_kernel_filter_dy') # create_kernels self.kernels = {} if MSIP_sizes != []: self.create_kernels(MSIP_sizes, name_scope) if 8 not in MSIP_sizes: self.create_kernels([8], name_scope)
Example #27
Source File: trainer_lib.py From tensor2tensor with Apache License 2.0 | 5 votes |
def set_random_seed(seed): tf.set_random_seed(seed) random.seed(seed) np.random.seed(seed)
Example #28
Source File: util.py From nni with MIT License | 5 votes |
def set_global_seeds(i): """set global seeds""" rank = 0 myseed = i + 1000 * rank if i is not None else None tf.set_random_seed(myseed) np.random.seed(myseed) random.seed(myseed)
Example #29
Source File: evolved_transformer_test.py From tensor2tensor with Apache License 2.0 | 5 votes |
def testSlowVsFast(self): tf.set_random_seed(1234) model, features = get_model(transformer.transformer_tiny()) decode_length = DECODE_LENGTH out_logits, _ = model(features) out_logits = tf.squeeze(out_logits, axis=[2, 3]) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), labels=tf.reshape(features["targets"], [-1])) loss = tf.reduce_mean(loss) apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) with self.test_session(): tf.global_variables_initializer().run() for _ in range(10): apply_grad.run() model.set_mode(tf.estimator.ModeKeys.PREDICT) with tf.variable_scope(tf.get_variable_scope(), reuse=True): greedy_result = model._slow_greedy_infer(features, decode_length)["outputs"] greedy_result = tf.squeeze(greedy_result, axis=[2, 3]) fast_result = model._greedy_infer(features, decode_length)["outputs"] with self.test_session(): greedy_res = greedy_result.eval() fast_res = fast_result.eval() self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) self.assertAllClose(greedy_res, fast_res)
Example #30
Source File: flop_regularizer_test.py From morph-net with Apache License 2.0 | 5 votes |
def setUp(self): super(GammaFlopResidualConnectionsLossTest, self).setUp() tf.set_random_seed(7) self._threshold = 0.6