Python tensorflow.python.lib.io.file_io.read_file_to_string() Examples

The following are 30 code examples of tensorflow.python.lib.io.file_io.read_file_to_string(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.lib.io.file_io , or try the search function .
Example #1
Source File: test_cloud_workflow.py    From pydatalab with Apache License 2.0 6 votes vote down vote up
def _run_batch_prediction(self):
    """Run batch prediction using the cloudml engine prediction service.

    There is no local version of this step as it's the last step.
    """

    job_name = 'test_mltoolbox_batchprediction_%s' % uuid.uuid4().hex
    cmd = ['gcloud ml-engine jobs submit prediction ' + job_name,
           '--data-format=TEXT',
           '--input-paths=' + self._csv_predict_filename,
           '--output-path=' + self._prediction_output,
           '--model-dir=' + os.path.join(self._train_output, 'model'),
           '--runtime-version=1.0',
           '--region=us-central1']
    self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
    subprocess.check_call(' '.join(cmd), shell=True)  # async call.
    subprocess.check_call('gcloud ml-engine jobs stream-logs ' + job_name, shell=True)

    # check that there was no errors.
    error_files = file_io.get_matching_files(
        os.path.join(self._prediction_output, 'prediction.errors_stats*'))
    self.assertEqual(1, len(error_files))
    error_str = file_io.read_file_to_string(error_files[0])
    self.assertEqual('', error_str) 
Example #2
Source File: projector_plugin.py    From lambda-packs with MIT License 6 votes vote down vote up
def _latest_checkpoints_changed(configs, run_path_pairs):
  """Returns true if the latest checkpoint has changed in any of the runs."""
  for run_name, assets_dir in run_path_pairs:
    if run_name not in configs:
      config = projector_config_pb2.ProjectorConfig()
      config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
      if file_io.file_exists(config_fpath):
        file_content = file_io.read_file_to_string(config_fpath)
        text_format.Merge(file_content, config)
    else:
      config = configs[run_name]

    # See if you can find a checkpoint file in the logdir.
    logdir = _assets_dir_to_logdir(assets_dir)
    ckpt_path = _find_latest_checkpoint(logdir)
    if not ckpt_path:
      continue
    if config.model_checkpoint_path != ckpt_path:
      return True
  return False 
Example #3
Source File: task.py    From pydatalab with Apache License 2.0 6 votes vote down vote up
def local_analysis(args):
  if args.analysis:
    # Already analyzed.
    return

  if not args.schema or not args.features:
    raise ValueError('Either --analysis, or both --schema and --features are provided.')

  tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
  cluster_spec = tf_config.get('cluster', {})
  if len(cluster_spec.get('worker', [])) > 0:
    raise ValueError('If "schema" and "features" are provided, local analysis will run and ' +
                     'only BASIC scale-tier (no workers node) is supported.')

  if cluster_spec and not (args.schema.startswith('gs://') and args.features.startswith('gs://')):
    raise ValueError('Cloud trainer requires GCS paths for --schema and --features.')

  print('Running analysis.')
  schema = json.loads(file_io.read_file_to_string(args.schema).decode())
  features = json.loads(file_io.read_file_to_string(args.features).decode())
  args.analysis = os.path.join(args.job_dir, 'analysis')
  args.transform = True
  file_io.recursive_create_dir(args.analysis)
  feature_analysis.run_local_analysis(args.analysis, args.train, schema, features)
  print('Analysis done.') 
Example #4
Source File: saved_model_test.py    From keras-lambda with MIT License 6 votes vote down vote up
def _validate_asset_collection(self, export_dir, graph_collection_def,
                                 expected_asset_file_name,
                                 expected_asset_file_contents,
                                 expected_asset_tensor_name):
    assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
    asset = meta_graph_pb2.AssetFileDef()
    assets_any[0].Unpack(asset)
    assets_path = os.path.join(
        compat.as_bytes(export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY),
        compat.as_bytes(expected_asset_file_name))
    actual_asset_contents = file_io.read_file_to_string(assets_path)
    self.assertEqual(expected_asset_file_contents,
                     compat.as_text(actual_asset_contents))
    self.assertEqual(expected_asset_file_name, asset.filename)
    self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name) 
Example #5
Source File: saved_model_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _validate_asset_collection(self, export_dir, graph_collection_def,
                                 expected_asset_file_name,
                                 expected_asset_file_contents,
                                 expected_asset_tensor_name):
    assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
    asset = meta_graph_pb2.AssetFileDef()
    assets_any[0].Unpack(asset)
    assets_path = os.path.join(
        compat.as_bytes(export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY),
        compat.as_bytes(expected_asset_file_name))
    actual_asset_contents = file_io.read_file_to_string(assets_path)
    self.assertEqual(expected_asset_file_contents,
                     compat.as_text(actual_asset_contents))
    self.assertEqual(expected_asset_file_name, asset.filename)
    self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name) 
Example #6
Source File: saved_model_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _validate_asset_collection(self, export_dir, graph_collection_def,
                                 expected_asset_file_name,
                                 expected_asset_file_contents,
                                 expected_asset_tensor_name):
    assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
    asset = meta_graph_pb2.AssetFileDef()
    assets_any[0].Unpack(asset)
    assets_path = os.path.join(
        compat.as_bytes(export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY),
        compat.as_bytes(expected_asset_file_name))
    actual_asset_contents = file_io.read_file_to_string(assets_path)
    self.assertEqual(expected_asset_file_contents,
                     compat.as_text(actual_asset_contents))
    self.assertEqual(expected_asset_file_name, asset.filename)
    self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name) 
Example #7
Source File: task.py    From pydatalab with Apache License 2.0 6 votes vote down vote up
def local_analysis(args):
  if args.analysis:
    # Already analyzed.
    return

  if not args.schema or not args.features:
    raise ValueError('Either --analysis, or both --schema and --features are provided.')

  tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
  cluster_spec = tf_config.get('cluster', {})
  if len(cluster_spec.get('worker', [])) > 0:
    raise ValueError('If "schema" and "features" are provided, local analysis will run and ' +
                     'only BASIC scale-tier (no workers node) is supported.')

  if cluster_spec and not (args.schema.startswith('gs://') and args.features.startswith('gs://')):
    raise ValueError('Cloud trainer requires GCS paths for --schema and --features.')

  print('Running analysis.')
  schema = json.loads(file_io.read_file_to_string(args.schema).decode())
  features = json.loads(file_io.read_file_to_string(args.features).decode())
  args.analysis = os.path.join(args.job_dir, 'analysis')
  args.transform = True
  file_io.recursive_create_dir(args.analysis)
  feature_analysis.run_local_analysis(args.analysis, args.train, schema, features)
  print('Analysis done.') 
Example #8
Source File: util.py    From pydatalab with Apache License 2.0 6 votes vote down vote up
def get_vocabulary(preprocess_output_dir, name):
  """Loads the vocabulary file as a list of strings.

  Args:
    preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.
    name: name of the csv column.

  Returns:
    List of strings.

  Raises:
    ValueError: if file is missing.
  """
  vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name)
  if not file_io.file_exists(vocab_file):
    raise ValueError('File %s not found in %s' %
                     (CATEGORICAL_ANALYSIS % name, preprocess_output_dir))

  labels = python_portable_string(
      file_io.read_file_to_string(vocab_file)).split('\n')
  label_values = [x for x in labels if x]  # remove empty lines

  return label_values 
Example #9
Source File: taxi.py    From code-snippets with Apache License 2.0 5 votes vote down vote up
def read_schema(path):
  """Reads a schema from the provided location.

  Args:
    path: The location of the file holding a serialized Schema proto.

  Returns:
    An instance of Schema or None if the input argument is None
  """
  result = schema_pb2.Schema()
  contents = file_io.read_file_to_string(path)
  text_format.Parse(contents, result)
  return result 
Example #10
Source File: _local_predict.py    From pydatalab with Apache License 2.0 5 votes vote down vote up
def get_model_schema_and_features(model_dir):
  """Get a local model's schema and features config.

  Args:
    model_dir: local or GCS path of a model.
  Returns:
    A tuple of schema (list) and features config (dict).
  """
  schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')
  schema = json.loads(file_io.read_file_to_string(schema_file))
  features_file = os.path.join(model_dir, 'assets.extra', 'features.json')
  features_config = json.loads(file_io.read_file_to_string(features_file))
  return schema, features_config 
Example #11
Source File: meta_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _read_file(filename):
  """Reads a file containing `GraphDef` and returns the protocol buffer.

  Args:
    filename: `graph_def` filename including the path.

  Returns:
    A `GraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  graph_def = graph_pb2.GraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    graph_def.ParseFromString(file_content)
    return graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return graph_def 
Example #12
Source File: meta_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def read_meta_graph_file(filename):
  """Reads a file containing `MetaGraphDef` and returns the protocol buffer.

  Args:
    filename: `meta_graph_def` filename including the path.

  Returns:
    A `MetaGraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  meta_graph_def = meta_graph_pb2.MetaGraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    meta_graph_def.ParseFromString(file_content)
    return meta_graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return meta_graph_def 
Example #13
Source File: evaluator.py    From moonlight with Apache License 2.0 5 votes vote down vote up
def evaluate(self, ground_truth):
    expected = file_io.read_file_to_string(ground_truth.ground_truth_filename)
    score = self.omr.run(
        page_spec.filename for page_spec in ground_truth.page_spec)
    actual = conversions.score_to_musicxml(score)
    return musicxml.musicxml_similarity(actual, expected) 
Example #14
Source File: evaluator.py    From moonlight with Apache License 2.0 5 votes vote down vote up
def main(argv):
  if len(argv) <= 1:
    raise ValueError('Ground truth filenames are required')
  evaluator = Evaluator()
  for ground_truth_file in argv[1:]:
    truth = groundtruth_pb2.GroundTruth()
    text_format.Parse(file_io.read_file_to_string(ground_truth_file), truth)
    print(truth.title)
    print(evaluator.evaluate(truth)) 
Example #15
Source File: transform_run.py    From pipelines with Apache License 2.0 5 votes vote down vote up
def load_schema(analysis_path):
  type_map = {
    'KEY': StringType(),
    'NUMBER': DoubleType(),
    'CATEGORY': StringType(),
    'TEXT': StringType(),
    'IMAGE_URL': StringType()
  }
  schema_file = os.path.join(analysis_path, 'schema.json')
  schema_json = json.loads(file_io.read_file_to_string(schema_file))
  fields = [StructField(x['name'], type_map[x['type']]) for x in schema_json]
  return schema_json, StructType(fields) 
Example #16
Source File: analyze_run.py    From pipelines with Apache License 2.0 5 votes vote down vote up
def load_schema(schema_file):
  type_map = {
    'KEY': StringType(),
    'NUMBER': DoubleType(),
    'CATEGORY': StringType(),
    'TEXT': StringType(),
    'IMAGE_URL': StringType()
  }
  
  schema_json = json.loads(file_io.read_file_to_string(schema_file))
  fields = [StructField(x['name'], type_map[x['type']]) for x in schema_json]
  return schema_json, StructType(fields) 
Example #17
Source File: saver.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _GetState(self):
    """Returns the latest checkpoint id."""
    state = CheckpointState()
    if file_io.file_exists(self._state_file):
      content = file_io.read_file_to_string(self._state_file)
      text_format.Merge(content, state)
    return state 
Example #18
Source File: taxi_schema.py    From code-snippets with Apache License 2.0 5 votes vote down vote up
def read_schema(path):
  """Reads a schema from the provided location.

  Args:
    path: The location of the file holding a serialized Schema proto.

  Returns:
    An instance of Schema or None if the input argument is None
  """
  result = schema_pb2.Schema()
  contents = file_io.read_file_to_string(path)
  text_format.Parse(contents, result)
  return result 
Example #19
Source File: predict.py    From GarvinBook with MIT License 5 votes vote down vote up
def load_batch(fpath):
    object = file_io.read_file_to_string(fpath)
    #origin_bytes = bytes(object, encoding='latin1')
    # with open(fpath, 'rb') as f:
    if sys.version_info > (3, 0):
        # Python3
        d = pickle.loads(object, encoding='latin1')
    else:
        # Python2
        d = pickle.loads(object)
    data = d["data"]
    labels = d["labels"]
    return data, labels 
Example #20
Source File: train.py    From GarvinBook with MIT License 5 votes vote down vote up
def load_batch(fpath):
    object = file_io.read_file_to_string(fpath)
    #origin_bytes = bytes(object, encoding='latin1')
    # with open(fpath, 'rb') as f:
    if sys.version_info > (3, 0):
        # Python3
        d = pickle.loads(object, encoding='latin1')
    else:
        # Python2
        d = pickle.loads(object)
    data = d["data"]
    labels = d["labels"]
    return data, labels 
Example #21
Source File: utils.py    From fritz-models with MIT License 5 votes vote down vote up
def load_image(
        filename,
        height,
        width,
        expand_dims=False):
    """Load an image and transform it to a specific size.

    Optionally, preprocess the image through the VGG preprocessor.

    Args:
        filename (TYPE): Description
        height (TYPE): Description
        width (TYPE): Description
        expand_dims (bool, optional): Description
        filename - an image file to load
        height - the height of the transformed image
        width - the width of the transformed image
        vgg_preprocess - if True, preprocess the image for a VGG network.
        expand_dims - Add an addition dimension (B, H, W, C), useful for
                      feeding models.

    Returns:
        img - a numpy array representing the image.
    """
    img = file_io.read_file_to_string(filename, binary_mode=True)
    img = PIL.Image.open(io.BytesIO(img))
    img = img.resize((width, height), resample=PIL.Image.BILINEAR)
    img = numpy.array(img)[:, :, :3]

    if expand_dims:
        img = numpy.expand_dims(img, axis=0)

    return img 
Example #22
Source File: estimator_test.py    From estimator with Apache License 2.0 5 votes vote down vote up
def test_checkpoint_contains_relative_paths(self):
    tmpdir = tempfile.mkdtemp()
    est = estimator.EstimatorV2(
        model_dir=tmpdir, model_fn=model_fn_global_step_incrementer)
    est.train(dummy_input_fn, steps=5)

    checkpoint_file_content = file_io.read_file_to_string(
        os.path.join(tmpdir, 'checkpoint'))
    ckpt = checkpoint_state_pb2.CheckpointState()
    text_format.Merge(checkpoint_file_content, ckpt)
    self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
    # TODO(b/78461127): Please modify tests to not directly rely on names of
    # checkpoints.
    self.assertAllEqual(['model.ckpt-0', 'model.ckpt-5'],
                        ckpt.all_model_checkpoint_paths) 
Example #23
Source File: meta_graph.py    From keras-lambda with MIT License 5 votes vote down vote up
def _read_file(filename):
  """Reads a file containing `GraphDef` and returns the protocol buffer.

  Args:
    filename: `graph_def` filename including the path.

  Returns:
    A `GraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  graph_def = graph_pb2.GraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    graph_def.ParseFromString(file_content)
    return graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return graph_def 
Example #24
Source File: meta_graph.py    From keras-lambda with MIT License 5 votes vote down vote up
def read_meta_graph_file(filename):
  """Reads a file containing `MetaGraphDef` and returns the protocol buffer.

  Args:
    filename: `meta_graph_def` filename including the path.

  Returns:
    A `MetaGraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  meta_graph_def = meta_graph_pb2.MetaGraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    meta_graph_def.ParseFromString(file_content)
    return meta_graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return meta_graph_def 
Example #25
Source File: plugin.py    From keras-lambda with MIT License 5 votes vote down vote up
def _read_latest_config_files(self, run_path_pairs):
    """Reads and returns the projector config files in every run directory."""
    configs = {}
    config_fpaths = {}
    for run_name, logdir in run_path_pairs:
      config = ProjectorConfig()
      config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
      if file_io.file_exists(config_fpath):
        file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
        text_format.Merge(file_content, config)

      has_tensor_files = False
      for embedding in config.embeddings:
        if embedding.tensor_path:
          has_tensor_files = True
          break

      if not config.model_checkpoint_path:
        # See if you can find a checkpoint file in the logdir.
        ckpt_path = latest_checkpoint(logdir)
        if not ckpt_path:
          # Or in the parent of logdir.
          ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))
          if not ckpt_path and not has_tensor_files:
            continue
        if ckpt_path:
          config.model_checkpoint_path = ckpt_path

      # Sanity check for the checkpoint file.
      if (config.model_checkpoint_path and
          not checkpoint_exists(config.model_checkpoint_path)):
        logging.warning('Checkpoint file %s not found',
                        config.model_checkpoint_path)
        continue
      configs[run_name] = config
      config_fpaths[run_name] = config_fpath
    return configs, config_fpaths 
Example #26
Source File: local_preprocess.py    From pydatalab with Apache License 2.0 5 votes vote down vote up
def run_analysis(args):
  """Builds an analysis files for training."""

  # Read the schema and input feature types
  schema_list = json.loads(
      file_io.read_file_to_string(args.schema_file))

  run_numerical_categorical_analysis(args, schema_list)

  # Also save a copy of the schema in the output folder.
  file_io.copy(args.schema_file,
               os.path.join(args.output_dir, SCHEMA_FILE),
               overwrite=True) 
Example #27
Source File: tf_schema_utils.py    From spotify-tensorflow with Apache License 2.0 5 votes vote down vote up
def parse_schema_txt_file(schema_path):  # type: (str) -> Schema
    """
    Parse a tf.metadata Schema txt file into its in-memory representation.
    """
    assert file_io.file_exists(schema_path), "File not found: {}".format(schema_path)
    schema = Schema()
    schema_text = file_io.read_file_to_string(schema_path)
    google.protobuf.text_format.Parse(schema_text, schema)
    return schema 
Example #28
Source File: projector_plugin.py    From lambda-packs with MIT License 5 votes vote down vote up
def _read_latest_config_files(self, run_path_pairs):
    """Reads and returns the projector config files in every run directory."""
    configs = {}
    config_fpaths = {}
    for run_name, assets_dir in run_path_pairs:
      config = projector_config_pb2.ProjectorConfig()
      config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
      if file_io.file_exists(config_fpath):
        file_content = file_io.read_file_to_string(config_fpath)
        text_format.Merge(file_content, config)
      has_tensor_files = False
      for embedding in config.embeddings:
        if embedding.tensor_path:
          if not embedding.tensor_name:
            embedding.tensor_name = os.path.basename(embedding.tensor_path)
          has_tensor_files = True
          break

      if not config.model_checkpoint_path:
        # See if you can find a checkpoint file in the logdir.
        logdir = _assets_dir_to_logdir(assets_dir)
        ckpt_path = _find_latest_checkpoint(logdir)
        if not ckpt_path and not has_tensor_files:
          continue
        if ckpt_path:
          config.model_checkpoint_path = ckpt_path

      # Sanity check for the checkpoint file.
      if (config.model_checkpoint_path and
          not checkpoint_exists(config.model_checkpoint_path)):
        logging.warning('Checkpoint file "%s" not found',
                        config.model_checkpoint_path)
        continue
      configs[run_name] = config
      config_fpaths[run_name] = config_fpath
    return configs, config_fpaths 
Example #29
Source File: meta_graph.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _read_file(filename):
  """Reads a file containing `GraphDef` and returns the protocol buffer.

  Args:
    filename: `graph_def` filename including the path.

  Returns:
    A `GraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  graph_def = graph_pb2.GraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    graph_def.ParseFromString(file_content)
    return graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return graph_def 
Example #30
Source File: meta_graph.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def read_meta_graph_file(filename):
  """Reads a file containing `MetaGraphDef` and returns the protocol buffer.

  Args:
    filename: `meta_graph_def` filename including the path.

  Returns:
    A `MetaGraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  meta_graph_def = meta_graph_pb2.MetaGraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    meta_graph_def.ParseFromString(file_content)
    return meta_graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return meta_graph_def