Python tensorflow.compat.v1.get_collection() Examples
The following are 30
code examples of tensorflow.compat.v1.get_collection().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: calibration_metrics_tf1_test.py From models with Apache License 2.0 | 6 votes |
def test_expected_calibration_error_all_bins_filled(self): """Test expected calibration error when all bins contain predictions.""" y_true, y_pred = self._get_calibration_placeholders() expected_ece_op, update_op = calibration_metrics.expected_calibration_error( y_true, y_pred, nbins=2) with self.test_session() as sess: metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) sess.run(tf.variables_initializer(var_list=metrics_vars)) # Bin calibration errors (|confidence - accuracy| * bin_weight): # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08 # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1 sess.run( update_op, feed_dict={ y_pred: np.array([0., 0.2, 0.4, 0.5, 1.0]), y_true: np.array([0, 0, 1, 0, 1]) }) actual_ece = 0.08 + 0.1 expected_ece = sess.run(expected_ece_op) self.assertAlmostEqual(actual_ece, expected_ece)
Example #2
Source File: post_training_quantization.py From models with Apache License 2.0 | 6 votes |
def restore_model(sess, checkpoint_path, enable_ema=True): """Restore variables from the checkpoint into the provided session. Args: sess: A tensorflow session where the checkpoint will be loaded. checkpoint_path: Path to the trained checkpoint. enable_ema: (optional) Whether to load the exponential moving average (ema) version of the tensorflow variables. Defaults to True. """ if enable_ema: ema = tf.train.ExponentialMovingAverage(decay=0.0) ema_vars = tf.trainable_variables() + tf.get_collection("moving_vars") for v in tf.global_variables(): if "moving_mean" in v.name or "moving_variance" in v.name: ema_vars.append(v) ema_vars = list(set(ema_vars)) var_dict = ema.variables_to_restore(ema_vars) else: var_dict = None sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(var_dict, max_to_keep=1) saver.restore(sess, checkpoint_path)
Example #3
Source File: inception_resnet_v2_test.py From models with Apache License 2.0 | 6 votes |
def testVariablesSetDevice(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 with self.test_session(): inputs = tf.random.uniform((batch_size, height, width, 3)) # Force all Variables to reside on the device. with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): inception.inception_resnet_v2(inputs, num_classes) with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): inception.inception_resnet_v2(inputs, num_classes) for v in tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): self.assertDeviceEqual(v.device, '/cpu:0') for v in tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): self.assertDeviceEqual(v.device, '/gpu:0')
Example #4
Source File: nasnet_test.py From models with Apache License 2.0 | 6 votes |
def testVariablesSetDeviceMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random.uniform((batch_size, height, width, 3)) tf.train.create_global_step() # Force all Variables to reside on the device. with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): nasnet.build_nasnet_mobile(inputs, num_classes) with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): nasnet.build_nasnet_mobile(inputs, num_classes) for v in tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): self.assertDeviceEqual(v.device, '/cpu:0') for v in tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): self.assertDeviceEqual(v.device, '/gpu:0')
Example #5
Source File: inception_v4_test.py From models with Apache License 2.0 | 6 votes |
def testVariablesSetDevice(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random.uniform((batch_size, height, width, 3)) # Force all Variables to reside on the device. with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): inception.inception_v4(inputs, num_classes) with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): inception.inception_v4(inputs, num_classes) for v in tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): self.assertDeviceEqual(v.device, '/cpu:0') for v in tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): self.assertDeviceEqual(v.device, '/gpu:0')
Example #6
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #7
Source File: transformer.py From mesh with Apache License 2.0 | 6 votes |
def initialize(self): """Initialize the teacher model from the checkpoint. This function will be called after the graph has been constructed. """ if self.fraction_soft == 0.0: # Do nothing if we do not need the teacher. return vars_to_restore = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope="teacher") tf.train.init_from_checkpoint( self.teacher_checkpoint, {v.name[len("teacher/"):].split(":")[0]: v for v in vars_to_restore}) # gin-configurable constructors
Example #8
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 2) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, []) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #9
Source File: model_deploy_test.py From models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #10
Source File: layers_test.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def testBatchNormUpdateImproveStatistics(self): """Test that updating the moving_mean improves statistics.""" _, _, inputs = _get_inputs() # Use small decay_rate to update faster. bn = ibp.BatchNorm(offset=False, scale=False, decay_rate=0.1, update_ops_collection=tf.GraphKeys.UPDATE_OPS) out1 = bn(inputs, is_training=False) # Build the update ops. bn(inputs, is_training=True) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) # Before updating the moving_mean the results are off. self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 2, 5) sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))) # After updating the moving_mean the results are better. out_v = sess.run(out1) self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 1, 2)
Example #11
Source File: train_image_classifier.py From models with Apache License 2.0 | 6 votes |
def _get_variables_to_train(): """Returns a list of variables to train. Returns: A list of variables to train by the optimizer. """ if FLAGS.trainable_scopes is None: return tf.trainable_variables() else: scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')] variables_to_train = [] for scope in scopes: variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) variables_to_train.extend(variables) return variables_to_train
Example #12
Source File: calibration_metrics_tf1_test.py From models with Apache License 2.0 | 6 votes |
def test_expected_calibration_error_all_bins_not_filled(self): """Test expected calibration error when no predictions for one bin.""" y_true, y_pred = self._get_calibration_placeholders() expected_ece_op, update_op = calibration_metrics.expected_calibration_error( y_true, y_pred, nbins=2) with self.test_session() as sess: metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) sess.run(tf.variables_initializer(var_list=metrics_vars)) # Bin calibration errors (|confidence - accuracy| * bin_weight): # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08 # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1 sess.run( update_op, feed_dict={ y_pred: np.array([0., 0.2, 0.4]), y_true: np.array([0, 0, 1]) }) actual_ece = np.abs(0.2 - (1 / 3.)) expected_ece = sess.run(expected_ece_op) self.assertAlmostEqual(actual_ece, expected_ece)
Example #13
Source File: train_util.py From magenta with Apache License 2.0 | 6 votes |
def define_train_ops(gan_model, gan_loss, **kwargs): """Defines progressive GAN train ops. Args: gan_model: A `GANModel` namedtuple. gan_loss: A `GANLoss` namedtuple. **kwargs: A dictionary of 'adam_beta1': A float of Adam optimizer beta1. 'adam_beta2': A float of Adam optimizer beta2. 'generator_learning_rate': A float of generator learning rate. 'discriminator_learning_rate': A float of discriminator learning rate. Returns: A tuple of `GANTrainOps` namedtuple and a list variables tracking the state of optimizers. """ with tf.variable_scope('progressive_gan_train_ops') as var_scope: beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2'] gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1, beta2) dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'], beta1, beta2) gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt) return gan_train_ops, tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)
Example #14
Source File: nasnet_test.py From benchmarks with Apache License 2.0 | 6 votes |
def testVariablesSetDeviceMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() # Force all Variables to reside on the device. with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): nasnet.build_nasnet_mobile(inputs, num_classes) with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): nasnet.build_nasnet_mobile(inputs, num_classes) for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): self.assertDeviceEqual(v.device, '/cpu:0') for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): self.assertDeviceEqual(v.device, '/gpu:0')
Example #15
Source File: training.py From lamb with Apache License 2.0 | 6 votes |
def _load_checkpoint(checkpoint_filename, extra_vars, trainable_only=False): if tf.gfile.IsDirectory(checkpoint_filename): checkpoint_filename = tf.train.latest_checkpoint(checkpoint_filename) logging.info('Loading checkpoint %s', checkpoint_filename) saveables = (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)) if trainable_only: saveables = list(set(saveables) & set(tf.trainable_variables())) # Try to restore all saveables, if that fails try without extra_vars. try: saver = tf.train.Saver(var_list=saveables) saver.restore(tf.get_default_session(), checkpoint_filename) except (ValueError, tf.errors.NotFoundError): logging.info('Missing key in checkpoint. Trying old checkpoint format.') saver = tf.train.Saver(var_list=list(set(saveables) - set(extra_vars))) saver.restore(tf.get_default_session(), checkpoint_filename)
Example #16
Source File: learner.py From meta-dataset with Apache License 2.0 | 6 votes |
def compute_loss(self, onehot_labels, predictions): """Computes the MSE loss of `predictions` with respect to `onehot_labels`. Args: onehot_labels: A `tf.Tensor` containing the the class labels; each vector along the class dimension should hold a valid probability distribution. predictions: A `tf.Tensor` containing the the class predictions, interpreted as unnormalized log probabilities. Returns: A `tf.Tensor` representing the average loss. """ mse_loss = tf.losses.mean_squared_error(onehot_labels, predictions) regularization = tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) loss = mse_loss + self.weight_decay * regularization return loss
Example #17
Source File: ssd_meta_arch.py From models with Apache License 2.0 | 6 votes |
def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) if self._box_predictor.is_keras_model: update_ops.extend(self._box_predictor.get_updates_for(None)) update_ops.extend(self._box_predictor.get_updates_for( self._box_predictor.inputs)) if self._feature_extractor.is_keras_model: update_ops.extend(self._feature_extractor.get_updates_for(None)) update_ops.extend(self._feature_extractor.get_updates_for( self._feature_extractor.inputs)) return update_ops
Example #18
Source File: learner.py From meta-dataset with Apache License 2.0 | 6 votes |
def compute_loss(self, onehot_labels, predictions): """Computes the CE loss of `predictions` with respect to `onehot_labels`. Args: onehot_labels: A `tf.Tensor` containing the the class labels; each vector along the class dimension should hold a valid probability distribution. predictions: A `tf.Tensor` containing the the class predictions, interpreted as unnormalized log probabilities. Returns: A `tf.Tensor` representing the average loss. """ cross_entropy_loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=predictions) regularization = tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) loss = cross_entropy_loss + self.weight_decay * regularization return loss
Example #19
Source File: transformer_vae.py From tensor2tensor with Apache License 2.0 | 6 votes |
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): """Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode.""" estimator_spec = super(TransformerAE, self).estimator_spec_eval( features, logits, labels, loss, losses_dict) if common_layers.is_xla_compiled(): # For TPUs (and XLA more broadly?), do not add summary hooks that depend # on losses; they are not supported. return estimator_spec summary_op = tf.get_collection(tf.GraphKeys.SUMMARIES, scope="losses") summary_op.extend(tf.get_collection(tf.GraphKeys.SUMMARIES, scope="loss")) summary_op.append(tf.summary.scalar("loss", loss)) summary_saver_hook = tf.train.SummarySaverHook( save_steps=100, summary_op=summary_op, output_dir=os.path.join(self.hparams.model_dir, "eval")) hooks = list(estimator_spec.evaluation_hooks) hooks.append(summary_saver_hook) return estimator_spec._replace(evaluation_hooks=hooks)
Example #20
Source File: lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py From models with Apache License 2.0 | 6 votes |
def check_feature_extractor_variables_under_scopes( self, depth_multiplier, pad_to_multiple, scope_names): g = tf.Graph() with g.as_default(): feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple) preprocessed_inputs = tf.placeholder(tf.float32, (4, 320, 320, 3)) feature_extractor.extract_features( preprocessed_inputs, unroll_length=1) variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) for variable in variables: self.assertTrue( any([ variable.name.startswith(scope_name) for scope_name in scope_names ]), 'Variable name: ' + variable.name + ' is not under any provided scopes: ' + ','.join(scope_names))
Example #21
Source File: faster_rcnn_meta_arch.py From models with Apache License 2.0 | 5 votes |
def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ all_losses = [] slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # Copy the slim losses to avoid modifying the collection if slim_losses: all_losses.extend(slim_losses) # TODO(kaftan): Possibly raise an error if the feature extractors are # uninitialized in Keras. if self._feature_extractor_for_proposal_features: if (self._feature_extractor_for_proposal_features != _UNINITIALIZED_FEATURE_EXTRACTOR): all_losses.extend(self._feature_extractor_for_proposal_features.losses) if isinstance(self._first_stage_box_predictor_first_conv, tf.keras.Model): all_losses.extend( self._first_stage_box_predictor_first_conv.losses) if self._first_stage_box_predictor.is_keras_model: all_losses.extend(self._first_stage_box_predictor.losses) if self._feature_extractor_for_box_classifier_features: if (self._feature_extractor_for_box_classifier_features != _UNINITIALIZED_FEATURE_EXTRACTOR): all_losses.extend( self._feature_extractor_for_box_classifier_features.losses) if self._mask_rcnn_box_predictor: if self._mask_rcnn_box_predictor.is_keras_model: all_losses.extend(self._mask_rcnn_box_predictor.losses) return all_losses
Example #22
Source File: ssd_model.py From benchmarks with Apache License 2.0 | 5 votes |
def _collect_backbone_vars(self): backbone_vars = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME) var_list = {} # Assume variables in the checkpoint are following the naming convention of # a model checkpoint trained with TF official model # TODO(haoyuzhang): the following variable name parsing is hacky and easy # to break if there is change in naming convention of either benchmarks or # official models. for v in backbone_vars: # conv2d variable example (model <-- checkpoint): # v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel if 'conv2d' in v.name: re_match = re.search(r'conv(\d+)/conv2d/(.+):', v.name) if re_match: layer_id = int(re_match.group(1)) param_name = re_match.group(2) vname_in_ckpt = self._var_name_in_official_model_ckpt( 'conv2d', layer_id, param_name) var_list[vname_in_ckpt] = v # batchnorm varariable example: # v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma elif 'batchnorm' in v.name: re_match = re.search(r'batchnorm(\d+)/(.+):', v.name) if re_match: layer_id = int(re_match.group(1)) param_name = re_match.group(2) vname_in_ckpt = self._var_name_in_official_model_ckpt( 'batch_normalization', layer_id, param_name) var_list[vname_in_ckpt] = v return var_list
Example #23
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticlone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(len(clones), num_clones) for i, clone in enumerate(clones): self.assertEqual( clone.outputs.op.name, 'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope) self.assertEqual(len(update_ops), 2) self.assertEqual(clone.scope, 'clone_%d/' % i) self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
Example #24
Source File: calibration_metrics_tf1_test.py From models with Apache License 2.0 | 5 votes |
def test_expected_calibration_error_with_multiple_data_streams(self): """Test expected calibration error when multiple data batches provided.""" y_true, y_pred = self._get_calibration_placeholders() expected_ece_op, update_op = calibration_metrics.expected_calibration_error( y_true, y_pred, nbins=2) with self.test_session() as sess: metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) sess.run(tf.variables_initializer(var_list=metrics_vars)) # Identical data to test_expected_calibration_error_all_bins_filled, # except split over three batches. sess.run( update_op, feed_dict={ y_pred: np.array([0., 0.2]), y_true: np.array([0, 0]) }) sess.run( update_op, feed_dict={ y_pred: np.array([0.4, 0.5]), y_true: np.array([1, 0]) }) sess.run( update_op, feed_dict={ y_pred: np.array([1.0]), y_true: np.array([1]) }) actual_ece = 0.08 + 0.1 expected_ece = sess.run(expected_ece_op) self.assertAlmostEqual(actual_ece, expected_ece)
Example #25
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticlone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), num_clones * 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '') self.assertDeviceEqual(v.device, 'CPU:0')
Example #26
Source File: model_deploy_test.py From models with Apache License 2.0 | 5 votes |
def testCreateMulticloneCPU(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier model_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones, clone_on_cpu=True) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, model_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), num_clones * 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '') self.assertDeviceEqual(v.device, 'CPU:0')
Example #27
Source File: model_deploy.py From models with Apache License 2.0 | 5 votes |
def _gather_clone_loss(clone, num_clones, regularization_losses): """Gather the loss for a single clone. Args: clone: A Clone namedtuple. num_clones: The number of clones being deployed. regularization_losses: Possibly empty list of regularization_losses to add to the clone losses. Returns: A tensor for the total loss for the clone. Can be None. """ # The return value. sum_loss = None # Individual components of the loss that will need summaries. clone_loss = None regularization_loss = None # Compute and aggregate losses on the clone device. with tf.device(clone.device): all_losses = [] clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope) if clone_losses: clone_loss = tf.add_n(clone_losses, name='clone_loss') if num_clones > 1: clone_loss = tf.div(clone_loss, 1.0 * num_clones, name='scaled_clone_loss') all_losses.append(clone_loss) if regularization_losses: regularization_loss = tf.add_n(regularization_losses, name='regularization_loss') all_losses.append(regularization_loss) if all_losses: sum_loss = tf.add_n(all_losses) # Add the summaries out of the clone device block. if clone_loss is not None: tf.summary.scalar('/'.join(filter(None, ['Losses', clone.scope, 'clone_loss'])), clone_loss) if regularization_loss is not None: tf.summary.scalar('Losses/regularization_loss', regularization_loss) return sum_loss
Example #28
Source File: visualization_utils_test.py From models with Apache License 2.0 | 5 votes |
def test_add_hist_image_summary(self): def graph_fn(): values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50] bins = [0.01 * i for i in range(101)] visualization_utils.add_hist_image_summary(values, bins, 'ScoresDistribution') hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0] return hist_image_summary self.execute(graph_fn, [])
Example #29
Source File: trainer.py From meta-dataset with Apache License 2.0 | 5 votes |
def get_train_op(self, global_step): """Returns the operation that performs a training update.""" # UPDATE_OPS picks up batch_norm updates. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = self.optimizer.minimize( self.losses[TRAIN_SPLIT], global_step=global_step) return train_op
Example #30
Source File: visualization_utils_test.py From models with Apache License 2.0 | 5 votes |
def test_add_cdf_image_summary(self): def graph_fn(): values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50] visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss') cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0] return cdf_image_summary self.execute(graph_fn, [])