Python absl.logging.fatal() Examples

The following are 30 code examples of absl.logging.fatal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module absl.logging , or try the search function .
Example #1
Source File: retrain.py    From hub with Apache License 2.0 6 votes vote down vote up
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           decoded_image_tensor, resized_input_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  logging.debug('Creating bottleneck at %s', bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not tf.gfile.Exists(image_path):
    logging.fatal('File does not exist %s', image_path)
  image_data = tf.gfile.GFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, decoded_image_tensor,
        resized_input_tensor, bottleneck_tensor)
  except Exception as e:
    raise RuntimeError('Error during processing file %s (%s)' % (image_path,
                                                                 str(e)))
  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with tf.gfile.GFile(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string) 
Example #2
Source File: ner.py    From fancy-nlp with GNU General Public License v3.0 6 votes vote down vote up
def score(self, data: List[List[str]], labels: List[List[str]]) -> float:
        """Evaluate the performance of ner model with given data and labels, return the f1 score.

        Args:
            data: List of List of str. List of tokenized (in char level) texts ,
                like ``[['我', '在', '上', '海', '上', '学'], ...]``.
            labels: List of List of str. The corresponding labels , usually in BIO or BIOES
                format, like ``[['O', 'O', 'B-LOC', 'I-LOC', 'O', 'O'], ...]``.

        Returns:
            Float. The F1 score.

        """
        if self.trainer:
            return self.trainer.evaluate(data, labels)
        else:
            logging.fatal('Trainer is None! Call fit() or load() to get trainer.') 
Example #3
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def find_log_dir(log_dir=None):
  """Returns the most suitable directory to put log files into.

  Args:
    log_dir: str|None, if specified, the logfile(s) will be created in that
        directory.  Otherwise if the --log_dir command-line flag is provided,
        the logfile will be created in that directory.  Otherwise the logfile
        will be created in a standard location.
  """
  # Get a list of possible log dirs (will try to use them in order).
  if log_dir:
    # log_dir was explicitly specified as an arg, so use it and it alone.
    dirs = [log_dir]
  elif FLAGS['log_dir'].value:
    # log_dir flag was provided, so use it and it alone (this mimics the
    # behavior of the same flag in logging.cc).
    dirs = [FLAGS['log_dir'].value]
  else:
    dirs = ['/tmp/', './']

  # Find the first usable log dir.
  for d in dirs:
    if os.path.isdir(d) and os.access(d, os.W_OK):
      return d
  _absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs) 
Example #4
Source File: ner.py    From fancy-nlp with GNU General Public License v3.0 6 votes vote down vote up
def predict_batch(self, texts: Union[List[str], List[List[str]]]) -> List[List[str]]:
        """Return the tag sequences of given batch of texts predicted by the ner model

        Args:
            texts: List of str or List of List of str. Can be a batch of un-tokenized texts,
                like ``['我在上海上学', ...]`` or a batch of tokenized (in char level) text sequences,
                like ``[['我', '在', '上', '海', '上', '学'], ...]``.

        Returns:
            List of List of str. The tag sequences, like ``[['O', 'O', 'B-LOC', 'I-LOC', 'O',
            'O']]``

        """
        if self.predictor:
            return self.predictor.tag_batch(texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #5
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def set_stderrthreshold(s):
  """Sets the stderr threshold to the value passed in.

  Args:
    s: str|int, valid strings values are case-insensitive 'debug',
        'info', 'warning', 'error', and 'fatal'; valid integer values are
        logging.DEBUG|INFO|WARNING|ERROR|FATAL.

  Raises:
      ValueError: Raised when s is an invalid value.
  """
  if s in converter.ABSL_LEVELS:
    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
    FLAGS.stderrthreshold = s
  else:
    raise ValueError(
        'set_stderrthreshold only accepts integer absl logging level '
        'from -3 to 1, or case-insensitive string values '
        "'debug', 'info', 'warning', 'error', and 'fatal'. "
        'But found "{}" ({}).'.format(s, type(s))) 
Example #6
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def set_verbosity(v):
  """Sets the logging verbosity.

  Causes all messages of level <= v to be logged,
  and all messages of level > v to be silently discarded.

  Args:
    v: int|str, the verbosity level as an integer or string. Legal string values
        are those that can be coerced to an integer as well as case-insensitive
        'debug', 'info', 'warning', 'error', and 'fatal'.
  """
  try:
    new_level = int(v)
  except ValueError:
    new_level = converter.ABSL_NAMES[v.upper()]
  FLAGS.verbosity = new_level 
Example #7
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def value(self, v):
    if v in _CPP_LEVEL_TO_NAMES:
      # --stderrthreshold also accepts numberic strings whose values are
      # Abseil C++ log levels.
      cpp_value = int(v)
      v = _CPP_LEVEL_TO_NAMES[v]  # Normalize to strings.
    elif v.lower() in _CPP_NAME_TO_LEVELS:
      v = v.lower()
      if v == 'warn':
        v = 'warning'  # Use 'warning' as the canonical name.
      cpp_value = int(_CPP_NAME_TO_LEVELS[v])
    else:
      raise ValueError(
          '--stderrthreshold must be one of (case-insensitive) '
          "'debug', 'info', 'warning', 'error', 'fatal', "
          "or '0', '1', '2', '3', not '%s'" % v)

    self._value = v 
Example #8
Source File: cnn_dailymail.py    From datasets with Apache License 2.0 6 votes vote down vote up
def _find_files(dl_paths, publisher, url_dict):
  """Find files corresponding to urls."""
  if publisher == 'cnn':
    top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
  elif publisher == 'dm':
    top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
  else:
    logging.fatal('Unsupported publisher: %s', publisher)
  files = tf.io.gfile.listdir(top_dir)

  ret_files = []
  for p in files:
    basename = os.path.basename(p)
    if basename[0:basename.find('.story')] in url_dict:
      ret_files.append(os.path.join(top_dir, p))
  return ret_files 
Example #9
Source File: utils.py    From EfficientNet-PyTorch with Apache License 2.0 6 votes vote down vote up
def build_optimizer(learning_rate,
                    optimizer_name='rmsprop',
                    decay=0.9,
                    epsilon=0.001,
                    momentum=0.9):
  """Build optimizer."""
  if optimizer_name == 'sgd':
    logging.info('Using SGD optimizer')
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  elif optimizer_name == 'momentum':
    logging.info('Using Momentum optimizer')
    optimizer = tf.train.MomentumOptimizer(
        learning_rate=learning_rate, momentum=momentum)
  elif optimizer_name == 'rmsprop':
    logging.info('Using RMSProp optimizer')
    optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
                                          epsilon)
  else:
    logging.fatal('Unknown optimizer: %s', optimizer_name)

  return optimizer 
Example #10
Source File: generate_to_file.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def main(unused_argv):
  generate.init_modules(FLAGS.train_split)

  output_dir = os.path.expanduser(FLAGS.output_dir)
  if os.path.exists(output_dir):
    logging.fatal('output dir %s already exists', output_dir)
  logging.info('Writing to %s', output_dir)
  os.makedirs(output_dir)

  for regime, flat_modules in six.iteritems(generate.filtered_modules):
    regime_dir = os.path.join(output_dir, regime)
    os.mkdir(regime_dir)
    per_module = generate.counts[regime]
    for module_name, module in six.iteritems(flat_modules):
      path = os.path.join(regime_dir, module_name + '.txt')
      with open(path, 'w') as text_file:
        for _ in range(per_module):
          problem, _ = generate.sample_from_module(module)
          text_file.write(str(problem.question) + '\n')
          text_file.write(str(problem.answer) + '\n')
      logging.info('Written %s', path) 
Example #11
Source File: retrain.py    From hub with Apache License 2.0 5 votes vote down vote up
def get_image_path(image_lists, label_name, index, image_dir, category):
  """Returns a path to an image for a label at the given index.

  Args:
    image_lists: OrderedDict of training images for each label.
    label_name: Label string we want to get an image for.
    index: Int offset of the image we want. This will be moduloed by the
    available number of images for the label, so it can be arbitrarily large.
    image_dir: Root folder string of the subfolders containing the training
    images.
    category: Name string of set to pull images from - training, testing, or
    validation.

  Returns:
    File system path string to an image that meets the requested parameters.

  """
  if label_name not in image_lists:
    logging.fatal('Label does not exist %s.', label_name)
  label_lists = image_lists[label_name]
  if category not in label_lists:
    logging.fatal('Category does not exist %s.', category)
  category_list = label_lists[category]
  if not category_list:
    logging.fatal('Label %s has no images in the category %s.',
                  label_name, category)
  mod_index = index % len(category_list)
  base_name = category_list[mod_index]
  sub_dir = label_lists['dir']
  full_path = os.path.join(image_dir, sub_dir, base_name)
  return full_path 
Example #12
Source File: spm.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def predict_batch(self, test_texts: Tuple[List[str], List[str]]) -> List[str]:
        """Return predictions of the model for test data

        Args:
            test_texts: list of untokenized text pairs

        Returns:

        """
        if self.predictor:
            return self.predictor.matching_batch(test_texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #13
Source File: spm.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def analyze(self, text: Tuple[str, str]) -> Tuple[str, np.ndarray]:
        """Analyze text and return matching result with probability.

        Args:
            text: a pair of untokenized text
        Returns:

        """
        if self.predictor:
            return self.predictor.matching_with_prob(text)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #14
Source File: spm.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def analyze_batch(self, texts: Tuple[List[str], List[str]]) -> List[Tuple[str, np.ndarray]]:
        """Analyze text and return matching result with probability.

        Args:
            texts: list of untokenized text pairs
        Returns:

        """
        if self.predictor:
            return self.predictor.matching_with_prob_batch(texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #15
Source File: text_classification.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def predict(self, test_text):
        """Return prediction of the model for test data

        Args:
            test_text: untokenized text or tokenized (in char level) text

        Returns:

        """
        if self.predictor:
            return self.predictor.classify(test_text)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #16
Source File: text_classification.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def predict_batch(self, test_texts):
        """Return predictions of the model for test data

        Args:
            test_texts: list of untokenized texts or tokenized (in char level) texts

        Returns:

        """
        if self.predictor:
            return self.predictor.classify_batch(test_texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #17
Source File: text_classification.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def analyze(self, text):
        """Analyze text and return classification result with probability.

        Args:
            text: untokenized text or tokenized (in char level) text
        Returns:

        """
        if self.predictor:
            return self.predictor.classification_with_prob(text)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #18
Source File: text_classification.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def analyze_batch(self, texts):
        """Analyze text and return classification result with probability.

        Args:
            texts: untokenized texts or tokenized (in char level) texts
        Returns:

        """
        if self.predictor:
            return self.predictor.classification_with_prob_batch(texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #19
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_log_fatal_with_python(self):
    with mock.patch.object(self.logger, 'log'):
      self.logger.fatal(self.message)
      self.logger.log.assert_called_once_with(std_logging.FATAL, self.message) 
Example #20
Source File: core.py    From dm_control with Apache License 2.0 5 votes vote down vote up
def _error_callback(message):
  logging.fatal(util.to_native_string(message))


# Override MuJoCo's callbacks for handling warnings and errors. 
Example #21
Source File: utils.py    From text-to-text-transfer-transformer with Apache License 2.0 5 votes vote down vote up
def files(self, split):
    """Returns set of instructions for reading TFDS files for the dataset."""
    split = self._map_split(split)

    if "/" not in self.name and self.builder.BUILDER_CONFIGS:
      # If builder has multiple configs, and no particular config was
      # requested, raise an error.
      raise ValueError("Dataset '%s' has multiple configs." % self.name)

    split_info = self.builder.info.splits[split]
    files = split_info.file_instructions

    if not files:
      logging.fatal("No TFRecord files found for dataset: %s", self.name)
    return files 
Example #22
Source File: ner.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def analyze(self, text: Union[str, List[str]]) -> Dict[str, Any]:
        """Analyze the tagging result of given text predicted by the ner model and return the
        result in pretty format with detailed information.

        Args:
            text: str or List of str. Can be a un-tokenized text, like ``'我在上海上学'`` or a
                tokenized (in char level) text sequence, like ``['我', '在', '上', '海', '上', '学']``.

        Returns:
            A Dict including the original text and list of recognized entities with detailed
            information (name, type, score, offset). Specifically, it will be like:
            {'text': '我在上海上学',
             'entities': [{'name': '上海',
                           'type': 'LOC',
                           'score': 0.9986118674278259,
                           'beginOffset': 2,
                           'endOffset': 4
                           }]
            }

        Notes:
            the score of entity is the probability of being a named-entity, it is computed by
            taking the average the probability of all the tokens within the entity, which is
            predicted by the ner model. However, if one use crf layer at the last layer of ner
            model, the score will be always 1. This is because the viterbi algorithm used by crf
            will output a definite best path instead of probability distribution.

        """
        if self.predictor:
            return self.predictor.pretty_tag(text)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #23
Source File: ner.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def analyze_batch(self, texts: Union[List[str], List[List[str]]]) -> List[Dict[str, Any]]:
        """Analyze the tagging results of given batch of text predicted by the ner model and
        return the results in pretty format with detailed information.

        Args:
            texts: List of str or List of List of str. Can be a batch of un-tokenized texts,
                like ``['我在上海上学', ...]`` or a batch of tokenized (in char level) text sequences,
                like ``[['我', '在', '上', '海', '上', '学'], ...]``.

        Returns:
            List of Dict. Each Dict contain the tagging results of one text, including the original
            text and list of recognized entities with detailed information (name, type, score,
            offset). Specifically, it will be like:
            [{'text': '我在上海上学',
             'entities': [{'name': '上海',
                           'type': 'LOC',
                           'score': 0.9986118674278259,
                           'beginOffset': 2,
                           'endOffset': 4
                           }]
             }
             ...
            ]

        Notes:
            The score of entity is the probability of being a named-entity, it is computed by
            taking the average the probability of all the tokens within the entity, which is
            predicted by the ner model. However, if one use crf layer at the last layer of ner
            model, the score will be always 1. This is because the viterbi algorithm used by crf
            will output a definite best path instead of probability distribution.

        """
        if self.predictor:
            return self.predictor.pretty_tag_batch(texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #24
Source File: ner.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def restrict_analyze(self,
                         text: Union[str, List[str]],
                         threshold: float = 0.85) -> Dict[str, Any]:
        """Analyze the tagging result of given text predicted by the ner model and then remove some
        recognized entities such that 1) all entities's scores are higher than threshold; 2)
        for each entity type, only keep the entity with the highest score. After that, return the
        recognized result in pretty format with detailed information.

        Args:
            text: str or List of str. Can be a un-tokenized text, like ``'我在上海上学'`` or a
                tokenized (in char level) text sequence, like ``['我', '在', '上', '海', '上', '学']``.
            threshold: float. The scores of recognized entities must be higher than threshold.

        Returns:
            A Dict including the original text and list of recognized entities with detailed
            information (name, type, score, offset). Specifically, it will be like:
            {'text': '我在上海上学',
             'entities': [{'name': '上海',
                           'type': 'LOC',
                           'score': 0.9986118674278259,
                           'beginOffset': 2,
                           'endOffset': 4
                           }]
            }

        Notes:
            The score of entity is the probability of being a named-entity, it is computed by
            taking the average the probability of all the tokens within the entity, which is
            predicted by the ner model. However, if one use crf layer at the last layer of ner
            model, the score will be always 1. This is because the viterbi algorithm used by crf
            will output a definite best path instead of probability distribution. As a result,
            we do not recommend you use this function when using crf layer.

        """
        if self.predictor:
            return self.predictor.restrict_tag(text, threshold)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
Example #25
Source File: logging_functional_test_helper.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def _test_fatal_main_thread_only():
  """Test logging.fatal from main thread, no other threads running."""
  v = VerboseDel('fatal_main_thread_only main del called\n')
  try:
    logging.fatal('fatal_main_thread_only message')
  finally:
    del v 
Example #26
Source File: trax.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _reshape_by_device_single(x, n_devices):
  """Reshape x into a shape [n_devices, ...]."""
  x_shape = list(x.shape)
  batch_size = x_shape[0]
  batch_size_per_device = batch_size // n_devices
  # We require that n_devices divides batch_size evenly.
  if batch_size_per_device * n_devices != batch_size:
    logging.fatal(
        "We require that n_devices[%d] divides batch_size[%d] evenly.",
        n_devices, batch_size)
  # New shape.
  new_shape_prefix = [n_devices, batch_size_per_device]
  return np.reshape(x, new_shape_prefix + x_shape[1:]) 
Example #27
Source File: accumulator_impl.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def __init__(
      self,
      kernel_shape,
      multiplier: multiplier_impl.IMultiplier,
  ):
    super().__init__()

    if len(kernel_shape) not in (
        2,
        4,
    ):
      logging.fatal(
          "unsupported kernel shape, "
          "it is neither a dense kernel of length 2,"
          " nor a convolution kernel of length 4")

    kernel_shape_excluding_output_dim = kernel_shape[:-1]
    kernel_add_ops = np.prod(kernel_shape_excluding_output_dim)

    # bias are associate with filters; each filter adds 1 bias
    bias_add = 1

    add_ops = kernel_add_ops + bias_add
    self.log_add_ops = int(np.ceil(np.log2(add_ops)))

    self.multiplier = multiplier
    self.output = quantizer_impl.QuantizedBits()
    self.output.bits = self.log_add_ops + self.multiplier.output.bits
    self.output.int_bits = self.log_add_ops + self.multiplier.output.int_bits
    self.output.is_signed = self.multiplier.output.is_signed
    self.output.op_type = "accumulator"

    assert not self.multiplier.output.is_floating_point
    self.output.is_floating_point = False 
Example #28
Source File: cnn_dailymail.py    From datasets with Apache License 2.0 5 votes vote down vote up
def _subset_filenames(dl_paths, split):
  """Get filenames for a particular split."""
  assert isinstance(dl_paths, dict), dl_paths
  # Get filenames for a split.
  if split == tfds.Split.TRAIN:
    urls = _get_url_hashes(dl_paths['train_urls'])
  elif split == tfds.Split.VALIDATION:
    urls = _get_url_hashes(dl_paths['val_urls'])
  elif split == tfds.Split.TEST:
    urls = _get_url_hashes(dl_paths['test_urls'])
  else:
    logging.fatal('Unsupported split: %s', split)
  cnn = _find_files(dl_paths, 'cnn', urls)
  dm = _find_files(dl_paths, 'dm', urls)
  return cnn + dm 
Example #29
Source File: sun.py    From datasets with Apache License 2.0 5 votes vote down vote up
def _decode_image(fobj, session, filename):
  """Reads and decodes an image from a file object as a Numpy array.

  The SUN dataset contains images in several formats (despite the fact that
  all of them have .jpg extension). Some of them are:
    - BMP (RGB)
    - PNG (grayscale, RGBA, RGB interlaced)
    - JPEG (RGB)
    - GIF (1-frame RGB)
  Since TFDS assumes that all images have the same number of channels, we
  convert all of them to RGB.

  Args:
    fobj: File object to read from.
    session: TF session used to decode the images.
    filename: Filename of the original image in the archive.

  Returns:
    Numpy array with shape (height, width, channels).
  """

  buf = fobj.read()
  image = tfds.core.lazy_imports.cv2.imdecode(
      np.fromstring(buf, dtype=np.uint8), flags=3)  # Note: Converts to RGB.
  if image is None:
    logging.warning(
        "Image %s could not be decoded by OpenCV, falling back to TF", filename)
    try:
      image = tf.image.decode_image(buf, channels=3)
      image = session.run(image)
    except tf.errors.InvalidArgumentError:
      logging.fatal("Image %s could not be decoded by Tensorflow", filename)

  # The GIF images contain a single frame.
  if len(image.shape) == 4:  # rank=4 -> rank=3
    image = image.reshape(image.shape[1:])

  return image 
Example #30
Source File: emotion_solver.py    From delta with Apache License 2.0 5 votes vote down vote up
def infer(self, yield_single_examples=False):
    ''' inference '''
    logging.fatal("Not Implemented")