Python absl.logging.debug() Examples

The following are 30 code examples of absl.logging.debug(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module absl.logging , or try the search function .
Example #1
Source File: register.py    From delta with Apache License 2.0 6 votes vote down vote up
def import_all_modules_for_register(config=None, only_nlp=False):
  """Import all modules for register."""
  if only_nlp:
    all_modules = ALL_NLP_MODULES
  else:
    all_modules = ALL_MODULES

  add_custom_modules(all_modules, config)

  logging.debug(f"All modules: {all_modules}")
  errors = []
  for base_dir, modules in all_modules:
    for name in modules:
      try:
        if base_dir != "":
          full_name = base_dir + "." + name
        else:
          full_name = name
        importlib.import_module(full_name)
        logging.debug(f"{full_name} loaded.")
      except ImportError as error:
        errors.append((name, error))
  _handle_errors(errors) 
Example #2
Source File: driver.py    From tfx with Apache License 2.0 6 votes vote down vote up
def resolve_exec_properties(
      self,
      exec_properties: Dict[Text, Any],
      pipeline_info: data_types.PipelineInfo,
      component_info: data_types.ComponentInfo,
  ) -> Dict[Text, Any]:
    """Overrides BaseDriver.resolve_exec_properties()."""
    del pipeline_info, component_info

    input_config = example_gen_pb2.Input()
    json_format.Parse(exec_properties[utils.INPUT_CONFIG_KEY], input_config)

    input_base = exec_properties[utils.INPUT_BASE_KEY]
    logging.debug('Processing input %s.', input_base)

    # Note that this function updates the input_config.splits.pattern.
    fingerprint, select_span = utils.calculate_splits_fingerprint_and_span(
        input_base, input_config.splits)

    exec_properties[utils.INPUT_CONFIG_KEY] = json_format.MessageToJson(
        input_config, sort_keys=True, preserving_proto_field_name=True)
    exec_properties[utils.SPAN_PROPERTY_NAME] = select_span
    exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint

    return exec_properties 
Example #3
Source File: adder_factory.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def make_quantizer(self, quantizer_1: quantizer_impl.IQuantizer,
                     quantizer_2: quantizer_impl.IQuantizer):
    """make adder quantizer."""

    self.quantizer_1 = quantizer_1
    self.quantizer_2 = quantizer_2

    mode1 = quantizer_1.mode
    mode2 = quantizer_2.mode

    adder_impl_class = self.adder_impl_table[mode1][mode2]
    logging.debug(
        "qbn adder implemented as class %s",
        adder_impl_class.implemented_as())

    return adder_impl_class(
        quantizer_1,
        quantizer_2
    ) 
Example #4
Source File: speech_feature.py    From delta with Apache License 2.0 6 votes vote down vote up
def extract_feature(*wavefiles, **kwargs):
  ''' tensorflow fbank feat '''
  dry_run = kwargs.get('dry_run')
  feat_name = 'fbank'
  feat_name = kwargs.get('feature_name')
  assert feat_name

  graph, (input_tensor, output_tensor) = _freq_feat_graph(feat_name, **kwargs)
  sess = _get_session(_get_out_tensor_name(feat_name, 0), graph)

  for wavpath in wavefiles:
    savepath = os.path.splitext(wavpath)[0] + '.npy'
    logging.debug('extract_feat: input: {}, output: {}'.format(
        wavpath, savepath))

    feat = sess.run(output_tensor, feed_dict={input_tensor: wavpath})

    # save feat
    if dry_run:
      logging.info('save feat: path {} shape:{} dtype:{}'.format(
          savepath, feat.shape, feat.dtype))
    else:
      np.save(savepath, feat) 
Example #5
Source File: emotion_solver.py    From delta with Apache License 2.0 6 votes vote down vote up
def create_serving_input_receiver_fn(self):
    ''' infer input pipeline '''
    # with batch_size
    taskconf = self.config['data']['task']
    shape = [None] + taskconf['audio']['feature_shape']
    logging.debug('serving input shape:{}'.format(shape))

    #pylint: disable=no-member
    return tf.estimator.export.build_raw_serving_input_receiver_fn(
        features={
            'inputs':
                tf.placeholder(name="inputs", shape=shape, dtype=tf.float32),
            'texts':
                tf.placeholder(
                    name="texts",
                    shape=(None, taskconf['text']['max_text_len']),
                    dtype=tf.int32)
        },
        default_batch_size=None,
    ) 
Example #6
Source File: tracing.py    From federated with Apache License 2.0 6 votes vote down vote up
def span(
      self,
      scope: str,
      sub_scope: str,
      nonce: int,
      parent_span_yield: Optional[None],
      fn_args: Optional[Tuple[Any, ...]],
      fn_kwargs: Optional[Dict[str, Any]],
      trace_opts: Dict[str, Any],
  ) -> Generator[None, TraceResult, None]:
    assert parent_span_yield is None
    del parent_span_yield, fn_args, fn_kwargs, trace_opts
    start_time = time.time()
    logging.debug('(%s) Entering %s.%s', nonce, scope, sub_scope)
    yield None
    logging.debug('(%s) Exiting %s.%s. Elapsed time %f', nonce, scope,
                  sub_scope,
                  time.time() - start_time) 
Example #7
Source File: lan_sc2_env.py    From pysc2 with Apache License 2.0 6 votes vote down vote up
def tcp_server(tcp_addr, settings):
  """Start up the tcp server, send the settings."""
  family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
  sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
  sock.bind(tcp_addr)
  sock.listen(1)
  logging.info("Waiting for connection on %s", tcp_addr)
  conn, addr = sock.accept()
  logging.info("Accepted connection from %s", Addr(*addr[:2]))

  # Send map_data independently for py2/3 and json encoding reasons.
  write_tcp(conn, settings["map_data"])
  send_settings = {k: v for k, v in settings.items() if k != "map_data"}
  logging.debug("settings: %s", send_settings)
  write_tcp(conn, json.dumps(send_settings).encode())
  return conn 
Example #8
Source File: interest_exploration.py    From recsim with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               user_type_distribution=(0.3, 0.7),
               user_document_mean_affinity_matrix=((.1, .7), (.7, .1)),
               user_document_stddev_affinity_matrix=((.1, .1), (.1, .1)),
               user_ctor=IEUserState,
               **kwargs):
    self._number_of_user_types = len(user_type_distribution)
    self._user_type_dist = user_type_distribution
    if len(user_document_mean_affinity_matrix) != len(user_type_distribution):
      raise ValueError('The dimensions of user_type_distribution and '
                       'user_document_mean_affinity_matrix do not match.')
    if len(user_document_stddev_affinity_matrix) != len(user_type_distribution):
      raise ValueError('The dimensions of user_type_distribution and '
                       'user_document_stddev_affinity_matrix do not match.')
    self._user_doc_means = user_document_mean_affinity_matrix
    self._user_doc_stddev = user_document_stddev_affinity_matrix
    logging.debug('Initialized IEClusterUserSampler')
    super(IEClusterUserSampler, self).__init__(user_ctor, **kwargs) 
Example #9
Source File: common.py    From loaner with Apache License 2.0 6 votes vote down vote up
def _get_config_file_path(config_file_path):
  """Gets the config file path if a full path was not provided.

  Args:
    config_file_path: str, the name or the full path of the config file.

  Returns:
    A str representing the full path to the config file.
  """
  if os.path.isabs(config_file_path):
    return config_file_path
  logging.debug(
      'The full path for the config file was not specified, '
      'looking in the default directory.')
  return os.path.join(
      os.path.dirname(os.path.abspath(__file__)), '..', config_file_path) 
Example #10
Source File: lan_sc2_env.py    From pysc2 with Apache License 2.0 6 votes vote down vote up
def tcp_client(tcp_addr):
  """Connect to the tcp server, and return the settings."""
  family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
  sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
  for i in range(300):
    logging.info("Connecting to: %s, attempt %d", tcp_addr, i)
    try:
      sock.connect(tcp_addr)
      break
    except socket.error:
      time.sleep(1)
  else:
    sock.connect(tcp_addr)  # One last try, but don't catch this error.
  logging.info("Connected.")

  map_data = read_tcp(sock)
  settings_str = read_tcp(sock)
  if not settings_str:
    raise socket.error("Failed to read")
  settings = json.loads(settings_str.decode())
  logging.info("Got settings. map_name: %s.", settings["map_name"])
  logging.debug("settings: %s", settings)
  settings["map_data"] = map_data
  return sock, settings 
Example #11
Source File: storage.py    From loaner with Apache License 2.0 6 votes vote down vote up
def insert_bucket(self, bucket_name=None):
    """Inserts a Google Cloud Storage Bucket object.

    Args:
      bucket_name: str, the name of the Google Cloud Storage Bucket to insert.

    Returns:
      A dictionary object representing a Google Cloud Storage Bucket.
          type: google.cloud.storage.bucket.Bucket

    Raises:
      AlreadyExistsError: when trying to insert a bucket that already exists.
    """
    bucket_name = bucket_name or self._config.bucket
    try:
      new_bucket = self._client.create_bucket(bucket_name)
    except exceptions.Conflict as err:
      raise AlreadyExistsError(
          'the Google Cloud Storage Bucket with name {!r} already exists: '
          '{}'.format(bucket_name, err))

    logging.debug(
        'The Googld Cloud Storage Bucket %r has been created for project '
        '%r.', bucket_name, self._config.project)
    return new_bucket 
Example #12
Source File: long_term_satisfaction.py    From recsim with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               user_ctor=LTSUserState,
               memory_discount=0.7,
               sensitivity=0.01,
               innovation_stddev=0.05,
               choc_mean=5.0,
               choc_stddev=1.0,
               kale_mean=4.0,
               kale_stddev=1.0,
               time_budget=60,
               **kwargs):
    """Creates a new user state sampler."""
    logging.debug('Initialized LTSStaticUserSampler')
    self._state_parameters = {'memory_discount': memory_discount,
                              'sensitivity': sensitivity,
                              'innovation_stddev': innovation_stddev,
                              'choc_mean': choc_mean,
                              'choc_stddev': choc_stddev,
                              'kale_mean': kale_mean,
                              'kale_stddev': kale_stddev,
                              'time_budget': time_budget
                             }
    super(LTSStaticUserSampler, self).__init__(user_ctor, **kwargs) 
Example #13
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def value(self, v):
    if v in _CPP_LEVEL_TO_NAMES:
      # --stderrthreshold also accepts numberic strings whose values are
      # Abseil C++ log levels.
      cpp_value = int(v)
      v = _CPP_LEVEL_TO_NAMES[v]  # Normalize to strings.
    elif v.lower() in _CPP_NAME_TO_LEVELS:
      v = v.lower()
      if v == 'warn':
        v = 'warning'  # Use 'warning' as the canonical name.
      cpp_value = int(_CPP_NAME_TO_LEVELS[v])
    else:
      raise ValueError(
          '--stderrthreshold must be one of (case-insensitive) '
          "'debug', 'info', 'warning', 'error', 'fatal', "
          "or '0', '1', '2', '3', not '%s'" % v)

    self._value = v 
Example #14
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def set_verbosity(v):
  """Sets the logging verbosity.

  Causes all messages of level <= v to be logged,
  and all messages of level > v to be silently discarded.

  Args:
    v: int|str, the verbosity level as an integer or string. Legal string values
        are those that can be coerced to an integer as well as case-insensitive
        'debug', 'info', 'warning', 'error', and 'fatal'.
  """
  try:
    new_level = int(v)
  except ValueError:
    new_level = converter.ABSL_NAMES[v.upper()]
  FLAGS.verbosity = new_level 
Example #15
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def set_stderrthreshold(s):
  """Sets the stderr threshold to the value passed in.

  Args:
    s: str|int, valid strings values are case-insensitive 'debug',
        'info', 'warning', 'error', and 'fatal'; valid integer values are
        logging.DEBUG|INFO|WARNING|ERROR|FATAL.

  Raises:
      ValueError: Raised when s is an invalid value.
  """
  if s in converter.ABSL_LEVELS:
    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
    FLAGS.stderrthreshold = s
  else:
    raise ValueError(
        'set_stderrthreshold only accepts integer absl logging level '
        'from -3 to 1, or case-insensitive string values '
        "'debug', 'info', 'warning', 'error', and 'fatal'. "
        'But found "{}" ({}).'.format(s, type(s))) 
Example #16
Source File: interest_evolution.py    From recsim with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               user_ctor=IEvUserState,
               document_quality_factor=1.0,
               no_click_mass=1.0,
               min_normalizer=-1.0,
               **kwargs):
    """Creates a new user state sampler."""
    logging.debug('Initialized UtilityModelUserSampler')
    self._no_click_mass = no_click_mass
    self._min_normalizer = min_normalizer
    self._document_quality_factor = document_quality_factor
    super(UtilityModelUserSampler, self).__init__(user_ctor, **kwargs) 
Example #17
Source File: asr_seq_task_test.py    From delta with Apache License 2.0 5 votes vote down vote up
def test_dataset(self):
    for batch_mode in [True, False]:
      task_name = self.config['data']['task']['name']
      self.config['data']['task']['batch_mode'] = batch_mode
      self.config['data']['task']['dummy'] = False
      task = registers.task[task_name](self.config, self.mode)

      with self.cached_session(use_gpu=False, force_gpu=False):
        for features, labels in task.dataset(
            self.mode, self.batch_size, epoch=1):  # pylint: disable=bad-continuation
          logging.debug("feats : {} : {}".format(features['inputs'],
                                                 features['inputs'].shape))
          logging.debug("ilens : {} : {}".format(
              features['input_length'], features['input_length'].shape))
          logging.debug("targets : {} : {}".format(features['targets'],
                                                   features['targets'].shape))
          logging.debug("olens : {} : {}".format(
              features['target_length'], features['target_length'].shape))
          logging.debug("ctc : {}, shape : {}".format(labels['ctc'],
                                                      labels['ctc'].shape))
          self.assertDTypeEqual(features['inputs'], np.float32)
          self.assertDTypeEqual(features['targets'], np.int32)
          self.assertDTypeEqual(features['input_length'], np.int32)
          self.assertDTypeEqual(features['target_length'], np.int32)

          self.assertEqual(len(features['inputs'].shape), 4)
          self.assertEqual(len(features['input_length'].shape), 1)
          self.assertEqual(len(features['targets'].shape), 2)
          self.assertEqual(len(features['target_length'].shape), 1) 
Example #18
Source File: movielens_recs.py    From ml-fairness-gym with Apache License 2.0 5 votes vote down vote up
def _run_one_parallel_batch(envs, agent, config):
  """Simulate one batch of training interactions in parallel."""
  rewards = [0 for _ in envs]
  observations = [env.reset() for env in envs]
  for _ in range(config.max_episode_length):
    logging.debug('starting agent step')
    slates = agent.step(rewards, observations)
    logging.debug('starting envs step')
    observations, rewards, _, _ = zip(
        *[env.step(slate) for slate, env in zip(slates, envs)])
    logging.debug('done envs step')
    assert (len({obs['user']['user_id'] for obs in observations}) > 1 or
            len(observations) == 1
           ), 'In a parallel batch there should be many different users!'
  agent.end_episode(rewards, observations, eval_mode=True) 
Example #19
Source File: base_solver.py    From delta with Apache License 2.0 5 votes vote down vote up
def clip_gradients(self, grads_and_vars, clip_ratio):
    """Clip the gradients."""
    is_zip_obj = False
    if isinstance(grads_and_vars, zip):
      grads_and_vars = list(grads_and_vars)
      is_zip_obj = True

    with tf.variable_scope('grad'):
      for grad, var in grads_and_vars:
        if grad is not None:
          tf.summary.histogram(var.name[:-2], grad)
        else:
          logging.debug('%s gradient is None' % (var.name))

    # not clip
    if not clip_ratio:
      if is_zip_obj:
        grads, variables = zip(*grads_and_vars)
        grads_and_vars = zip(grads, variables)
      return grads_and_vars

    gradients, variables = zip(*grads_and_vars)
    clipped, global_norm = tf.clip_by_global_norm(gradients, clip_ratio)
    grad_and_var_clipped = zip(clipped, variables)

    tf.summary.scalar('gradient/global_norm', global_norm)
    return grad_and_var_clipped 
Example #20
Source File: speaker_solver.py    From delta with Apache License 2.0 5 votes vote down vote up
def create_serving_input_receiver_fn(self):
    # with batch_size
    taskconf = self.config['data']['task']
    shape = [None] + taskconf['audio']['feature_shape']
    logging.debug('serving input shape:{}'.format(shape))

    return tf.estimator.export.build_raw_serving_input_receiver_fn(
        features={
            'inputs':
                tf.placeholder(name="inputs", shape=shape, dtype=tf.float32),
        },
        default_batch_size=None,
    ) 
Example #21
Source File: asr_seq_task_test.py    From delta with Apache License 2.0 5 votes vote down vote up
def test_dummy_dataset(self):
    for batch_mode in [True, False]:
      task_name = self.config['data']['task']['name']
      self.config['data']['task']['batch_mode'] = batch_mode
      self.config['data']['task']['dummy'] = True
      task = registers.task[task_name](self.config, self.mode)

      with self.cached_session(use_gpu=False, force_gpu=False):
        for _ in task.dataset(self.mode, self.batch_size, epoch=1):
          break
        for features, labels in task.dataset(
            self.mode, self.batch_size, epoch=1):  # pylint: disable=bad-continuation
          logging.debug("feats : {} : {}".format(features['inputs'],
                                                 features['inputs'].shape))
          logging.debug("ilens : {} : {}".format(
              features['input_length'], features['input_length'].shape))
          logging.debug("targets : {} : {}".format(features['targets'],
                                                   features['targets'].shape))
          logging.debug("olens : {} : {}".format(
              features['target_length'], features['target_length'].shape))
          logging.debug("ctc : {}, shape : {}".format(labels['ctc'],
                                                      labels['ctc'].shape))
          self.assertDTypeEqual(features['inputs'], np.float32)
          self.assertDTypeEqual(features['targets'], np.int32)
          self.assertDTypeEqual(features['input_length'], np.int32)
          self.assertDTypeEqual(features['target_length'], np.int32)

          self.assertEqual(len(features['inputs'].shape), 4)
          self.assertEqual(len(features['input_length'].shape), 1)
          self.assertEqual(len(features['targets'].shape), 2)
          self.assertEqual(len(features['target_length'].shape), 1) 
Example #22
Source File: interest_evolution.py    From recsim with Apache License 2.0 5 votes vote down vote up
def __init__(self, user_ctor=IEvUserState, **kwargs):
    """Creates a new user state sampler."""
    logging.debug('Initialized IEvUserDistributionSampler')
    super(IEvUserDistributionSampler, self).__init__(user_ctor, **kwargs) 
Example #23
Source File: publisher.py    From tfx with Apache License 2.0 5 votes vote down vote up
def publish_execution(
      self,
      component_info: data_types.ComponentInfo,
      output_artifacts: Optional[Dict[Text, List[types.Artifact]]] = None,
      exec_properties: Optional[Dict[Text, Any]] = None):
    """Publishes a component execution to metadata.

    This function will do two things:
    1. update the execution that was previously registered before execution to
       complete or skipped state, depending on whether cached results are used.
    2. for each input and output artifact, publish an event that associate the
       artifact to the execution, with type INPUT or OUTPUT respectively

    Args:
      component_info: the information of the component
      output_artifacts: optional key -> Artifacts to be published as outputs
        of the execution
      exec_properties: optional execution properties to be published for the
        execution

    Returns:
      A dict containing output artifacts.
    """
    logging.debug('Outputs: %s', output_artifacts)
    logging.debug('Execution properties: %s', exec_properties)

    self._metadata_handler.publish_execution(
        component_info=component_info,
        output_artifacts=output_artifacts,
        exec_properties=exec_properties) 
Example #24
Source File: networks.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def add_losses(self,
                 config,
                 logits,
                 end_points,
                 label,
                 loss_type,
                 use_tpu=False):
    """Add the losses to train the model.

    Args:
      config: The slim config deployment used.
      logits: The logits that the model generates.
      end_points: The end points that the model generates.
      label: The labels of the current batch.
      loss_type: The type of loss to use.
      use_tpu: Whether to run on TPU.
    """
    logits = tf.check_numerics(logits, 'Logits is not a number.')
    label = tf.check_numerics(label, 'Label is not a number.')
    if loss_type == 'cross_entropy':
      slim.losses.softmax_cross_entropy(logits, label)
    elif loss_type == 'log':
      slim.losses.log_loss(end_points['predictions'], label)
    elif loss_type == 'huber':
      tf.losses.huber_loss(label, end_points['predictions'])
    else:
      slim.losses.sum_of_squares(end_points['predictions'], label)

    logging.debug('end points predictions %s', str(end_points['predictions']))
    logging.debug('label %s', str(label))
    if not use_tpu:
      with tf.device(config.inputs_device()):
        slim.summaries.add_histogram_summaries(
            list(end_points.values()), 'Predictions')
        slim.summaries.add_zero_fraction_summaries(list(end_points.values()))
        slim.summaries.add_histogram_summary(label, 'Labels')
        slim.summaries.add_histogram_summaries(
            slim.variables.get_model_variables()) 
Example #25
Source File: captain.py    From QAbot_by_base_KG with MIT License 5 votes vote down vote up
def _similarity_distance(s1, s2, ignore):
    '''
    compute similarity with distance measurement
    '''
    g = 0.0
    try:
        g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
        if is_digit(g_): g = g_
    except: pass

    u = _nearby_levenshtein_distance(s1, s2)
    logging.debug("g: %s, u: %s" % (g, u))
    if u >= 0.99:
        r = 1.0
    elif u > 0.9:
        r = _similarity_smooth(g, 0.05, u, 0.05)
    elif u > 0.8:
        r = _similarity_smooth(g, 0.1, u, 0.2)
    elif u > 0.4:
        r = _similarity_smooth(g, 0.2, u, 0.15)
    elif u > 0.2:
        r = _similarity_smooth(g, 0.3, u, 0.1)
    else:
        r = _similarity_smooth(g, 0.4, u, 0)

    if r < 0: r = abs(r)
    r = min(r, 1.0)
    return float("%.3f" % r) 
Example #26
Source File: __init__.py    From QAbot_by_base_KG with MIT License 5 votes vote down vote up
def check_initialized(self):
        # logging.debug("check_initialized: %s" % self.initialized)
        if not self.initialized:
            self.initialize() 
Example #27
Source File: train.py    From neural-structured-learning with Apache License 2.0 5 votes vote down vote up
def get_train_op(loss, optimizer, grad_clip=None, global_step=None):
  """Make a train_op apply gradients to loss using optimizer.

  Args:
   loss: the loss function to optimize
   optimizer: the optimizer to compute and apply gradients
   grad_clip: clip gradient norms by the value supplied (default dont clip)
   global_step: tf.placeholder for global_step

  Returns:
   train_op: the training op to run
   grads_and_vars: the gradients and variables for debugging
   var_names: the variable names for debugging
   capped_grads_and_vars: for debugging
  """
  variables = tf.trainable_variables()
  grads_and_vars = optimizer.compute_gradients(loss, variables)
  var_names = [v.name for v in variables]
  logging.info("Trainable variables:")
  for var in var_names:
    logging.info("\t %s", var)
  logging.debug(grads_and_vars)
  grad_var_norms = [(tf.global_norm([gv[1]]), tf.global_norm([gv[0]]))
                    for gv in grads_and_vars]

  if grad_clip:
    capped_grads_and_vars = [(tf.clip_by_norm(gv[0], grad_clip), gv[1])
                             for gv in grads_and_vars]
  else:
    capped_grads_and_vars = grads_and_vars
  # norms of gradients for debugging
  # grad_norms = [tf.sqrt(tf.reduce_sum(tf.square(grad)))
  #               for grad, _ in grads_and_vars]
  train_op = optimizer.apply_gradients(capped_grads_and_vars,
                                       global_step=global_step)
  return train_op, grad_var_norms, var_names, capped_grads_and_vars 
Example #28
Source File: threshold_policies.py    From ml-fairness-gym with Apache License 2.0 5 votes vote down vote up
def convex_hull_roc(roc):
  """Returns an roc curve without the points inside the convex hull.

  Points below the fpr=tpr line corresponding to random performance are also
  removed.

  Args:
    roc: A tuple of lists that are all the same length, containing
      (false_positive_rates, true_positive_rates, thresholds). This is the same
      format returned by sklearn.metrics.roc_curve.
  """
  fprs, tprs, thresholds = roc
  if np.isnan(fprs).any() or np.isnan(tprs).any():
    logging.debug("Convex hull solver does not handle NaNs.")
    return roc
  if len(fprs) < 3:
    logging.debug("Convex hull solver does not curves with < 3 points.")
    return roc
  try:
    # Add (fpr=1, tpr=0) to the convex hull to remove any points below the
    # random-performance line.
    hull = scipy.spatial.ConvexHull(np.vstack([fprs + [1], tprs + [0]]).T)
  except scipy.spatial.qhull.QhullError:
    logging.debug("Convex hull solver failed.")
    return roc
  verticies = set(hull.vertices)

  return (
      [fpr for idx, fpr in enumerate(fprs) if idx in verticies],
      [tpr for idx, tpr in enumerate(tprs) if idx in verticies],
      [thresh for idx, thresh in enumerate(thresholds) if idx in verticies],
  ) 
Example #29
Source File: deploy_impl.py    From loaner with Apache License 2.0 5 votes vote down vote up
def _MoveWebAppFrontendBundle(self):
    """Prepare frontend bundle destination and move the build there."""
    if os.path.isdir(self.frontend_bundle_path):
      logging.info(
          'The bundled frontend exists, we are replacing it with a new build.')
      shutil.rmtree(self.frontend_bundle_path)
    logging.debug('Moving the frontend bundle into the web app bundle.')
    shutil.move(
        os.path.join(self.frontend_src_path, 'dist'), self.frontend_bundle_path) 
Example #30
Source File: distributions.py    From ml-fairness-gym with Apache License 2.0 5 votes vote down vote up
def sample(self, rng):
    logging.debug("Sampling from a mixture with %d components. Weights: %s",
                  len(self.components), self.weights)
    component = rng.choice(self.components, p=self.weights)
    return component.sample(rng)