Python tensorflow.compat.v1.get_default_graph() Examples

The following are 30 code examples of tensorflow.compat.v1.get_default_graph(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def underlying_variable(t):
  """Find the underlying tf.Variable object.

  Args:
    t: a Tensor

  Returns:
    tf.Variable.
  """
  t = underlying_variable_ref(t)
  assert t is not None
  # make sure that the graph has a variable index and that it is up-to-date
  if not hasattr(tf.get_default_graph(), "var_index"):
    tf.get_default_graph().var_index = {}
  var_index = tf.get_default_graph().var_index
  for v in tf.global_variables()[len(var_index):]:
    var_index[v.name] = v
  return var_index[t.name] 
Example #2
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def test_group_lasso_conv3d(self):
    shape = [3, 3, 3]
    video = tf.zeros([2, 3, 3, 3, 1])
    net = slim.conv3d(
        video,
        5,
        shape,
        padding='VALID',
        weights_initializer=tf.glorot_normal_initializer(),
        scope='vconv1')
    conv3d_op = tf.get_default_graph().get_operation_by_name('vconv1/Conv3D')
    conv3d_weights = conv3d_op.inputs[1]

    threshold = 0.09
    flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op],
                                                           threshold=threshold)
    norm = tf.sqrt(tf.reduce_mean(tf.square(conv3d_weights), [0, 1, 2, 3]))
    alive = tf.reduce_sum(tf.cast(norm > threshold, tf.float32))
    with self.session():
      flop_coeff = 2 * shape[0] * shape[1] * shape[2]
      tf.compat.v1.global_variables_initializer().run()
      self.assertAllClose(flop_reg.get_cost(), flop_coeff * alive)
      self.assertAllClose(flop_reg.get_regularization_term(),
                          flop_coeff * tf.reduce_sum(norm)) 
Example #3
Source File: ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_fused_batchnorm(self, use_depthwise):
    use_keras = False
    image_height = 256
    image_width = 256
    depth_multiplier = 1
    pad_to_multiple = 1
    image_placeholder = tf.placeholder(tf.float32,
                                       [1, image_height, image_width, 3])
    feature_extractor = self._create_feature_extractor(
        depth_multiplier,
        pad_to_multiple,
        use_keras=use_keras,
        use_depthwise=use_depthwise)
    preprocessed_image = feature_extractor.preprocess(image_placeholder)
    _ = feature_extractor.extract_features(preprocessed_image)
    self.assertTrue(
        any('FusedBatchNorm' in op.type
            for op in tf.get_default_graph().get_operations())) 
Example #4
Source File: faster_rcnn_resnet_v1_feature_extractor_tf1_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_overwriting_activation_fn(self):
    for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']:
      feature_extractor = self._build_feature_extractor(
          first_stage_features_stride=16,
          architecture=architecture,
          activation_fn=tf.nn.relu6)
      preprocessed_inputs = tf.random_uniform([4, 224, 224, 3],
                                              maxval=255,
                                              dtype=tf.float32)
      rpn_feature_map, _ = feature_extractor.extract_proposal_features(
          preprocessed_inputs, scope='TestStage1Scope')
      _ = feature_extractor.extract_box_classifier_features(
          rpn_feature_map, scope='TestStaget2Scope')
      conv_ops = [
          op for op in tf.get_default_graph().get_operations()
          if op.type == 'Relu6'
      ]
      op_names = [op.name for op in conv_ops]

      self.assertIsNotNone(conv_ops)
      self.assertIn('TestStage1Scope/resnet_v1_50/resnet_v1_50/conv1/Relu6',
                    op_names)
      self.assertIn(
          'TestStaget2Scope/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/Relu6',
          op_names) 
Example #5
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testLossDecorated(self):
    self.BuildWithBatchNorm(True)
    self.AddRegularizer()
    # Create network regularizer with DummyDecorator op regularization.
    self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
        [self.conv3.op, self.conv4.op],
        gamma_threshold=0.45,
        regularizer_decorator=dummy_decorator.DummyDecorator,
        decorator_parameters={'scale': 0.5})

    all_convs = [
        o for o in tf.get_default_graph().get_operations() if o.type == 'Conv2D'
    ]
    total_reg_term = 1410376.375
    self.assertAllClose(total_reg_term * 0.5, self.GetLoss(all_convs))
    self.assertAllClose(total_reg_term * 0.5, self.GetLoss([])) 
Example #6
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testShareParams(self):
    # Tests reuse option.
    first_outputs = 2
    alternate_num_outputs = 12
    parameterization = {'first/Conv2D': first_outputs}
    decorator = ops.ConfigurableOps(parameterization=parameterization)
    explicit = layers.conv2d(
        self.inputs, first_outputs, 3, scope='first')
    with arg_scope([layers.conv2d], reuse=True):
      decorated = decorator.conv2d(
          self.inputs,
          num_outputs=alternate_num_outputs,
          kernel_size=3,
          scope='first')
    with self.cached_session():
      tf.global_variables_initializer().run()
      # verifies that parameters are shared.
      self.assertAllClose(explicit.eval(), decorated.eval())
    conv_ops = sorted([
        op.name
        for op in tf.get_default_graph().get_operations()
        if op.type == 'Conv2D'
    ])
    self.assertAllEqual(['first/Conv2D', 'first_1/Conv2D'], conv_ops) 
Example #7
Source File: graph_rewriter_builder_tf1_test.py    From models with Apache License 2.0 6 votes vote down vote up
def testQuantizationBuilderSetsUpCorrectTrainArguments(self):
    with mock.patch.object(
        contrib_quantize,
        'experimental_create_training_graph') as mock_quant_fn:
      with mock.patch.object(slim,
                             'summarize_collection') as mock_summarize_col:
        graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
        graph_rewriter_proto.quantization.delay = 10
        graph_rewriter_proto.quantization.weight_bits = 8
        graph_rewriter_proto.quantization.activation_bits = 8
        graph_rewrite_fn = graph_rewriter_builder.build(
            graph_rewriter_proto, is_training=True)
        graph_rewrite_fn()
        _, kwargs = mock_quant_fn.call_args
        self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
        self.assertEqual(kwargs['quant_delay'], 10)
        mock_summarize_col.assert_called_with('quant_vars') 
Example #8
Source File: lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_output_nodes_for_tflite(self):
    image_height = 64
    image_width = 64
    depth_multiplier = 1.0
    pad_to_multiple = 1
    image_placeholder = tf.placeholder(tf.float32,
                                       [1, image_height, image_width, 3])
    feature_extractor = self._create_feature_extractor(depth_multiplier,
                                                       pad_to_multiple)
    preprocessed_image = feature_extractor.preprocess(image_placeholder)
    _ = feature_extractor.extract_features(preprocessed_image, unroll_length=1)

    tflite_nodes = [
        'raw_inputs/init_lstm_c',
        'raw_inputs/init_lstm_h',
        'raw_inputs/base_endpoint',
        'raw_outputs/lstm_c',
        'raw_outputs/lstm_h',
        'raw_outputs/base_endpoint_1',
        'raw_outputs/base_endpoint_2'
    ]
    ops_names = [op.name for op in tf.get_default_graph().get_operations()]
    for node in tflite_nodes:
      self.assertTrue(any(node in s for s in ops_names)) 
Example #9
Source File: lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_fixed_concat_nodes(self):
    image_height = 64
    image_width = 64
    depth_multiplier = 1.0
    pad_to_multiple = 1
    image_placeholder = tf.placeholder(tf.float32,
                                       [1, image_height, image_width, 3])
    feature_extractor = self._create_feature_extractor(
        depth_multiplier, pad_to_multiple, is_quantized=True)
    preprocessed_image = feature_extractor.preprocess(image_placeholder)
    _ = feature_extractor.extract_features(preprocessed_image, unroll_length=1)

    concat_nodes = [
        'MobilenetV2_1/expanded_conv_16/project/Relu6',
        'MobilenetV2_2/expanded_conv_16/project/Relu6'
    ]
    ops_names = [op.name for op in tf.get_default_graph().get_operations()]
    for node in concat_nodes:
      self.assertTrue(any(node in s for s in ops_names)) 
Example #10
Source File: graph_rewriter_builder.py    From models with Apache License 2.0 5 votes vote down vote up
def build(graph_rewriter_config, is_training):
  """Returns a function that modifies default graph based on options.

  Args:
    graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
    is_training: whether in training of eval mode.
  """
  def graph_rewrite_fn():
    """Function to quantize weights and activation of the default graph."""
    if (graph_rewriter_config.quantization.weight_bits != 8 or
        graph_rewriter_config.quantization.activation_bits != 8):
      raise ValueError('Only 8bit quantization is supported')

    # Quantize the graph by inserting quantize ops for weights and activations
    if is_training:
      contrib_quantize.experimental_create_training_graph(
          input_graph=tf.get_default_graph(),
          quant_delay=graph_rewriter_config.quantization.delay
      )
    else:
      contrib_quantize.experimental_create_eval_graph(
          input_graph=tf.get_default_graph()
      )
    slim.summarize_collection('quant_vars')

  return graph_rewrite_fn 
Example #11
Source File: graph_rewriter_builder_tf1_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testQuantizationBuilderSetsUpCorrectEvalArguments(self):
    with mock.patch.object(contrib_quantize,
                           'experimental_create_eval_graph') as mock_quant_fn:
      with mock.patch.object(slim,
                             'summarize_collection') as mock_summarize_col:
        graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
        graph_rewriter_proto.quantization.delay = 10
        graph_rewrite_fn = graph_rewriter_builder.build(
            graph_rewriter_proto, is_training=False)
        graph_rewrite_fn()
        _, kwargs = mock_quant_fn.call_args
        self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
        mock_summarize_col.assert_called_with('quant_vars') 
Example #12
Source File: utils.py    From s4l with Apache License 2.0 5 votes vote down vote up
def import_graph(checkpoint_dir):
  """Imports the tf graph from latest checkpoint in checkpoint_dir."""
  checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
  tf.train.import_meta_graph(checkpoint + ".meta", clear_devices=True)
  return tf.get_default_graph() 
Example #13
Source File: generate_detection_data_tf1_test.py    From models with Apache License 2.0 5 votes vote down vote up
def _export_saved_model(self):
    tmp_dir = self.get_temp_dir()
    checkpoint_path = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(checkpoint_path)
    output_directory = os.path.join(tmp_dir, 'output')
    saved_model_path = os.path.join(output_directory, 'saved_model')
    tf.io.gfile.makedirs(output_directory)
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(num_classes=5)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      detection_model = model_builder.build(pipeline_config.model,
                                            is_training=False)
      outputs, placeholder_tensor = exporter.build_detection_graph(
          input_type='tf_example',
          detection_model=detection_model,
          input_shape=None,
          output_collection_name='inference_op',
          graph_hook_fn=None)
      output_node_names = ','.join(outputs.keys())
      saver = tf.train.Saver()
      input_saver_def = saver.as_saver_def()
      frozen_graph_def = exporter.freeze_graph_with_def_protos(
          input_graph_def=tf.get_default_graph().as_graph_def(),
          input_saver_def=input_saver_def,
          input_checkpoint=checkpoint_path,
          output_node_names=output_node_names,
          restore_op_name='save/restore_all',
          filename_tensor_name='save/Const:0',
          output_graph='',
          clear_devices=True,
          initializer_nodes='')
      exporter.write_saved_model(
          saved_model_path=saved_model_path,
          frozen_graph_def=frozen_graph_def,
          inputs=placeholder_tensor,
          outputs=outputs)
      return saved_model_path 
Example #14
Source File: mobilenet_defs_test.py    From models with Apache License 2.0 5 votes vote down vote up
def _assert_contains_op(self, op_name):
    op_names = [op.name for op in tf.get_default_graph().get_operations()]
    self.assertIn(op_name, op_names) 
Example #15
Source File: utils_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_quantizable_concat_is_training(self):
    inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32)
    inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32)
    concat_in_train = utils.quantizable_concat([inputs_1, inputs_2],
                                               axis=3,
                                               is_training=True)
    self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list())
    self._check_min_max_ema(tf.get_default_graph())
    self._check_min_max_vars(tf.get_default_graph()) 
Example #16
Source File: mobilenet_v3_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testMobilenetV3WithOutReduceMean(self, use_groupnorm):
    _, _ = mobilenet_v3.mobilenet(
        tf.placeholder(tf.float32, (1, 224, 224, 3)),
        conv_defs=mobilenet_v3.V3_SMALL,
        use_groupnorm=use_groupnorm,
        use_reduce_mean_for_pooling=False)
    g = tf.get_default_graph()
    reduce_mean = [v for v in g.get_operations() if 'ReduceMean' in v.name]
    self.assertEmpty(reduce_mean)
    self.assertVariablesHaveNormalizerFn(use_groupnorm) 
Example #17
Source File: mobilenet_v3_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testMobilenetV3WithReduceMean(self, use_groupnorm):
    _, _ = mobilenet_v3.mobilenet(
        tf.placeholder(tf.float32, (1, 224, 224, 3)),
        conv_defs=mobilenet_v3.V3_SMALL,
        use_groupnorm=use_groupnorm,
        use_reduce_mean_for_pooling=True)
    g = tf.get_default_graph()
    reduce_mean = [v for v in g.get_operations() if 'ReduceMean' in v.name]
    self.assertNotEmpty(reduce_mean)
    self.assertVariablesHaveNormalizerFn(use_groupnorm) 
Example #18
Source File: utils_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_quantizable_concat_inference(self):
    inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32)
    inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32)
    concat_in_train = utils.quantizable_concat([inputs_1, inputs_2],
                                               axis=3,
                                               is_training=False)
    self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list())
    self._check_no_min_max_ema(tf.get_default_graph())
    self._check_min_max_vars(tf.get_default_graph()) 
Example #19
Source File: convolutional_box_predictor_tf1_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_no_dangling_outputs(self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            dropout_keep_prob=0.8,
            kernel_size=3,
            box_code_size=4,
            use_dropout=True,
            use_depthwise=True))
    box_predictions = conv_box_predictor.predict(
        [image_features], num_predictions_per_location=[5],
        scope='BoxPredictor')
    tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)

    bad_dangling_ops = []
    types_safe_to_dangle = set(['Assign', 'Mul', 'Const'])
    for op in tf.get_default_graph().get_operations():
      if (not op.outputs) or (not op.outputs[0].consumers()):
        if 'BoxPredictor' in op.name:
          if op.type not in types_safe_to_dangle:
            bad_dangling_ops.append(op)

    self.assertEqual(bad_dangling_ops, []) 
Example #20
Source File: mobilenet_v2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def find_ops(optype):
  """Find ops of a given type in graphdef or a graph.

  Args:
    optype: operation type (e.g. Conv2D)
  Returns:
     List of operations.
  """
  gd = tf.get_default_graph()
  return [var for var in gd.get_operations() if var.type == optype] 
Example #21
Source File: model_statistics.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def get_flops(model):
    run_meta = tf.RunMetadata()
    graph = tf.get_default_graph()

    # We use the Keras session graph in the call to the profiler.
    opts = tf.profiler.ProfileOptionBuilder.float_operation()
    flops = tf.profiler.profile(graph=graph, run_meta=run_meta, cmd='op', options=opts)

    opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
    params = tf.profiler.profile(graph=graph, run_meta=run_meta, cmd='op', options=opts)

    print('Total FLOPs: {}m float_ops'.format(flops.total_float_ops/1e6))
    print('Total PARAMs: {}m'.format(params.total_parameters/1e6)) 
Example #22
Source File: seq2seq.py    From magenta with Apache License 2.0 5 votes vote down vote up
def _should_cache_variables():
  """Returns True if a default caching device should be set, otherwise False."""
  # Don't set a caching device when running in a loop, since it is possible that
  # train steps could be wrapped in a tf.while_loop. In that scenario caching
  # prevents forward computations in loop iterations from re-reading the
  # updated weights.
  graph = tf.get_default_graph()
  ctxt = graph._get_control_flow_context()  # pylint: disable=protected-access
  in_v1_while_loop = (
      control_flow_util.GetContainingWhileContext(ctxt) is not None)
  return not in_v1_while_loop 
Example #23
Source File: utils.py    From s4l with Apache License 2.0 5 votes vote down vote up
def assert_not_in_graph(tensor_name, graph=None):
  # Put get_default_graph() to the function instead of the parameter. It cannot
  # be called if the graph is not initialized.
  if graph is None:
    graph = tf.get_default_graph()
  tensor_names = [
      tensor.name for tensor in graph.as_graph_def().node
  ]

  assert tensor_name not in tensor_names, "%s already exists." % tensor_name 
Example #24
Source File: tfci.py    From compression with Apache License 2.0 5 votes vote down vote up
def instantiate_signature(signature_def):
  """Fetches tensors defined in a signature from the graph."""
  graph = tf.get_default_graph()
  inputs = {
      k: graph.get_tensor_by_name(v.name)
      for k, v in signature_def.inputs.items()
  }
  outputs = {
      k: graph.get_tensor_by_name(v.name)
      for k, v in signature_def.outputs.items()
  }
  return inputs, outputs 
Example #25
Source File: runner_lib.py    From recsim with Apache License 2.0 5 votes vote down vote up
def _set_up(self, eval_mode):
    """Sets up the runner by creating and initializing the agent."""
    # Reset the tf default graph to avoid name collisions from previous runs
    # before doing anything else.
    tf.reset_default_graph()
    self._summary_writer = tf.summary.FileWriter(self._output_dir)
    if self._episode_log_file:
      self._episode_writer = tf.io.TFRecordWriter(
          os.path.join(self._output_dir, self._episode_log_file))
    # Set up a session and initialize variables.
    self._sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    self._agent = self._create_agent_fn(
        self._sess,
        self._env,
        summary_writer=self._summary_writer,
        eval_mode=eval_mode)
    # type check: env/agent must both be multi- or single-user
    if self._agent.multi_user and not isinstance(
        self._env.environment, environment.MultiUserEnvironment):
      raise ValueError('Multi-user agent requires multi-user environment.')
    if not self._agent.multi_user and isinstance(
        self._env.environment, environment.MultiUserEnvironment):
      raise ValueError('Single-user agent requires single-user environment.')
    self._summary_writer.add_graph(graph=tf.get_default_graph())
    self._sess.run(tf.global_variables_initializer())
    self._sess.run(tf.local_variables_initializer()) 
Example #26
Source File: utils.py    From mesh with Apache License 2.0 5 votes vote down vote up
def remove_summaries():
  """Remove summaries from the default graph."""
  g = tf.get_default_graph()
  key = 'mtf_scalar_summaries'
  tf.logging.debug('Remove summaries %s' % str(g.get_collection(key)))
  del g.get_collection_ref(key)[:]
  assert not g.get_collection(key) 
Example #27
Source File: resnet.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def resnet_endpoints(model):
  """Extract intermediate values from ResNet model."""
  graph = tf.get_default_graph()
  scope = _get_resnet_scope()
  end_points = {}
  tensors = ['initial_conv', 'initial_max_pool', 'pre_final_pool',
             'final_reduce_mean', 'final_dense']
  tensors += [
      'block_layer{}'.format(i + 1) for i in range(len(model.block_sizes))]
  for name in tensors:
    tensor = graph.get_tensor_by_name('{}{}:0'.format(scope, name))
    if len(tensor.shape) == 4:
      tensor = _model_output(tensor, model.data_format)
    end_points[name] = tensor
  return end_points 
Example #28
Source File: resnet.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def _get_resnet_scope():
  scope = tf.get_default_graph().get_name_scope()
  if scope:
    scope += '/'
  return scope + 'resnet_model/' 
Example #29
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def num_params_flops(readable_format=True):
  """Return number of parameters and flops."""
  nparams = np.sum(
      [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
  options = tf.profiler.ProfileOptionBuilder.float_operation()
  options['output'] = 'none'
  flops = tf.profiler.profile(
      tf.get_default_graph(), options=options).total_float_ops
  # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof.
  flops = flops // 2
  if readable_format:
    nparams = float(nparams) * 1e-6
    flops = float(flops) * 1e-9
  return nparams, flops 
Example #30
Source File: mobilenet_test.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def find_ops(optype):
  """Find ops of a given type in graphdef or a graph.

  Args:
    optype: operation type (e.g. Conv2D)
  Returns:
     List of operations.
  """
  gd = tf.get_default_graph()
  return [var for var in gd.get_operations() if var.type == optype]