Python absl.flags.DEFINE_boolean() Examples

The following are 30 code examples of absl.flags.DEFINE_boolean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module absl.flags , or try the search function .
Example #1
Source File: flags.py    From benchmarks with Apache License 2.0 7 votes vote down vote up
def define_flags(specs=None):
  """Define a command line flag for each ParamSpec in flags.param_specs."""
  specs = specs or param_specs
  define_flag = {
      'boolean': absl_flags.DEFINE_boolean,
      'float': absl_flags.DEFINE_float,
      'integer': absl_flags.DEFINE_integer,
      'string': absl_flags.DEFINE_string,
      'enum': absl_flags.DEFINE_enum,
      'list': absl_flags.DEFINE_list
  }
  for name, param_spec in six.iteritems(specs):
    if param_spec.flag_type not in define_flag:
      raise ValueError('Unknown flag_type %s' % param_spec.flag_type)
    else:
      define_flag[param_spec.flag_type](name, param_spec.default_value,
                                        help=param_spec.description,
                                        **param_spec.kwargs) 
Example #2
Source File: lamb_flags.py    From lamb with Apache License 2.0 6 votes vote down vote up
def _define_flags(options):
  for option in _filter_options(options):
    name, type_, default_ = option[:3]
    if type_ == 'boolean':
      flags.DEFINE_boolean(name, default_, '')
    elif type_ == 'integer':
      flags.DEFINE_integer(name, default_, '')
    elif type_ == 'float':
      flags.DEFINE_float(name, default_, '')
    elif type_ == 'string':
      flags.DEFINE_string(name, default_, '')
    elif type_ == 'list_of_ints':
      flags.DEFINE_string(name, default_, '')
    else:
      assert 'Unexpected option type %s' % type_


# Define command-line flags for all options (unless `internal`). 
Example #3
Source File: wide_deep_run_loop.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def define_wide_deep_flags():
  """Add supervised learning flags, as well as wide-deep model type."""
  flags_core.define_base()
  flags_core.define_benchmark()
  flags_core.define_performance(
      num_parallel_calls=False, inter_op=True, intra_op=True,
      synthetic_data=False, max_train_steps=False, dtype=False,
      all_reduce_alg=False)

  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name="model_type", short_name="mt", default="wide_deep",
      enum_values=['wide', 'deep', 'wide_deep'],
      help="Select model topology.")
  flags.DEFINE_boolean(
      name="download_if_missing", default=True, help=flags_core.help_wrap(
          "Download data to data_dir if it is not already present.")) 
Example #4
Source File: wide_deep_run_loop.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def define_wide_deep_flags():
  """Add supervised learning flags, as well as wide-deep model type."""
  flags_core.define_base()
  flags_core.define_benchmark()
  flags_core.define_performance(
      num_parallel_calls=False, inter_op=True, intra_op=True,
      synthetic_data=False, max_train_steps=False, dtype=False,
      all_reduce_alg=False)

  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name="model_type", short_name="mt", default="wide_deep",
      enum_values=['wide', 'deep', 'wide_deep'],
      help="Select model topology.")
  flags.DEFINE_boolean(
      name="download_if_missing", default=True, help=flags_core.help_wrap(
          "Download data to data_dir if it is not already present.")) 
Example #5
Source File: flags.py    From dlcookbook-dlbs with Apache License 2.0 6 votes vote down vote up
def define_flags():
  """Define a command line flag for each ParamSpec in flags.param_specs."""
  define_flag = {
      'boolean': absl_flags.DEFINE_boolean,
      'float': absl_flags.DEFINE_float,
      'integer': absl_flags.DEFINE_integer,
      'string': absl_flags.DEFINE_string,
      'enum': absl_flags.DEFINE_enum,
      'list': absl_flags.DEFINE_list
  }
  for name, param_spec in six.iteritems(param_specs):
    if param_spec.flag_type not in define_flag:
      raise ValueError('Unknown flag_type %s' % param_spec.flag_type)
    else:
      define_flag[param_spec.flag_type](name, param_spec.default_value,
                                        help=param_spec.description,
                                        **param_spec.kwargs) 
Example #6
Source File: wide_deep_run_loop.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def define_wide_deep_flags():
  """Add supervised learning flags, as well as wide-deep model type."""
  flags_core.define_base(clean=True, train_epochs=True,
                         epochs_between_evals=True)
  flags_core.define_benchmark()
  flags_core.define_performance(
      num_parallel_calls=False, inter_op=True, intra_op=True,
      synthetic_data=False, max_train_steps=False, dtype=False,
      all_reduce_alg=False)

  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name="model_type", short_name="mt", default="wide_deep",
      enum_values=['wide', 'deep', 'wide_deep'],
      help="Select model topology.")
  flags.DEFINE_boolean(
      name="download_if_missing", default=True, help=flags_core.help_wrap(
          "Download data to data_dir if it is not already present.")) 
Example #7
Source File: module_foo.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def duplicate_flags(flagnames=None):
  """Returns a new FlagValues object with the requested flagnames.

  Used to test DuplicateFlagError detection.

  Args:
    flagnames: str, A list of flag names to create.

  Returns:
    A FlagValues object with one boolean flag for each name in flagnames.
  """
  flag_values = flags.FlagValues()
  for name in flagnames:
    flags.DEFINE_boolean(name, False, 'Flag named %s' % (name,),
                         flag_values=flag_values)
  return flag_values 
Example #8
Source File: module_bar.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def define_flags(flag_values=FLAGS):
  """Defines some flags.

  Args:
    flag_values: The FlagValues object we want to register the flags
      with.
  """
  # The 'tmod_bar_' prefix (short for 'test_module_bar') ensures there
  # is no name clash with the existing flags.
  flags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.',
                       flag_values=flag_values)
  flags.DEFINE_string('tmod_bar_y', 'default', 'String flag.',
                      flag_values=flag_values)
  flags.DEFINE_boolean('tmod_bar_z', False,
                       'Another boolean flag from module bar.',
                       flag_values=flag_values)
  flags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.',
                       flag_values=flag_values)
  flags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.',
                       flag_values=flag_values)
  flags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.',
                       flag_values=flag_values) 
Example #9
Source File: shakespeare_main.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def define_flags():
  """Define the flags for the Shakespeare character LSTM."""
  flags_core.define_base(data_dir=False,
                         clean=False,
                         train_epochs=True,
                         epochs_between_evals=False,
                         stop_threshold=False,
                         hooks=False,
                         export_dir=False,
                         run_eagerly=True)

  flags_core.define_performance(num_parallel_calls=False,
                                inter_op=False,
                                intra_op=False,
                                synthetic_data=False,
                                max_train_steps=False,
                                dtype=False,
                                enable_xla=True,
                                force_v2_in_keras_compile=True)

  flags_core.set_defaults(train_epochs=43,
                          batch_size=64)

  flags.DEFINE_boolean(name='enable_eager', default=True, help='Enable eager?')
  flags.DEFINE_boolean(
      name='train', default=True,
      help='If true trains the model.')
  flags.DEFINE_string(
      name='predict_context', default=None,
      help='If set, makes a prediction with the given context.')
  flags.DEFINE_integer(
      name='predict_length', default=1000,
      help='Length of the predicted text including the context.')
  flags.DEFINE_integer(
      name='log_steps', default=100,
      help='For every log_steps, we log the timing information such as '
      'examples per second.')
  flags.DEFINE_string(
      name='training_data', default=None,
      help='Path to file containing the training data.')
  flags.DEFINE_boolean(name='cudnn', default=True, help='Use CuDNN LSTM.') 
Example #10
Source File: keras_common.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def define_keras_flags():
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
  flags.DEFINE_integer(
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
      '# batches per epoch, then use # batches per epoch. When this flag is '
      'set, only one epoch is going to run for training.')
  flags.DEFINE_integer(
      name='log_steps', default=100,
      help='For every log_steps, we log the timing information such as '
      'examples per second. Besides, for every log_steps, we store the '
      'timestamp of a batch end.') 
Example #11
Source File: app.py    From clgen with GNU General Public License v3.0 5 votes vote down vote up
def DEFINE_boolean(
  name: str,
  default: Optional[bool],
  help: str,
  required: bool = False,
  validator: Callable[[bool], bool] = None,
):
  """Registers a flag whose value must be a boolean."""
  absl_flags.DEFINE_boolean(
    name, default, help, module_name=get_calling_module_name(),
  )
  if required:
    absl_flags.mark_flag_as_required(name)
  if validator:
    RegisterFlagValidator(name, validator) 
Example #12
Source File: flags.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def DEFINE_boolean(name, default, help):  # pylint: disable=invalid-name,redefined-builtin
  param_specs[name] = ParamSpec('boolean', default, help, {}) 
Example #13
Source File: ctl_common.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def define_ctl_flags():
  """Define flags for CTL."""

  flags.DEFINE_boolean(name='use_tf_function', default=True,
                       help='Wrap the train and test step inside a '
                       'tf.function.')
  flags.DEFINE_boolean(name='single_l2_loss_op', default=False,
                       help='Calculate L2_loss on concatenated weights, '
                       'instead of using Keras per-layer L2 loss.') 
Example #14
Source File: flags.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def DEFINE_boolean(name, default, help):  # pylint: disable=invalid-name,redefined-builtin
  param_specs[name] = ParamSpec('boolean', default, help, {}) 
Example #15
Source File: flags_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_find_module_defining_flag_passing_module_name(self):
    my_flags = flags.FlagValues()
    module_name = sys.__name__  # Must use an existing module.
    flags.DEFINE_boolean('flag_name', True,
                         'Flag with a different module name.',
                         flag_values=my_flags,
                         module_name=module_name)
    self.assertEqual(module_name,
                     my_flags.find_module_defining_flag('flag_name')) 
Example #16
Source File: flags_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def setUp(self):
    self.flag_values = flags.FlagValues()
    flags.DEFINE_string('unittest_message1', 'Foo!', 'You Add Here.',
                        flag_values=self.flag_values)
    flags.DEFINE_string('unittest_message2', 'Bar!', 'Hello, Sailor!',
                        flag_values=self.flag_values)
    flags.DEFINE_boolean('unittest_boolflag', 0, 'Some Boolean thing',
                         flag_values=self.flag_values)
    flags.DEFINE_integer('unittest_number', 12345, 'Some integer',
                         lower_bound=0, flag_values=self.flag_values)
    flags.DEFINE_list('UnitTestList', '1,2,3', 'Some list',
                      flag_values=self.flag_values)
    self.tmp_path = None
    self.flag_values.mark_as_parsed() 
Example #17
Source File: module_foo.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def define_flags(flag_values=FLAGS):
  """Defines a few flags."""
  module_bar.define_flags(flag_values=flag_values)
  # The 'tmod_foo_' prefix (short for 'test_module_foo') ensures that we
  # have no name clash with existing flags.
  flags.DEFINE_boolean('tmod_foo_bool', True, 'Boolean flag from module foo.',
                       flag_values=flag_values)
  flags.DEFINE_string('tmod_foo_str', 'default', 'String flag.',
                      flag_values=flag_values)
  flags.DEFINE_integer('tmod_foo_int', 3, 'Sample int flag.',
                       flag_values=flag_values) 
Example #18
Source File: flags_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_find_module_id_defining_flag_passing_module_name(self):
    my_flags = flags.FlagValues()
    module_name = sys.__name__  # Must use an existing module.
    flags.DEFINE_boolean('flag_name', True,
                         'Flag with a different module name.',
                         flag_values=my_flags,
                         module_name=module_name)
    self.assertEqual(id(sys),
                     my_flags.find_module_id_defining_flag('flag_name')) 
Example #19
Source File: keras_common.py    From ml-on-gcp with Apache License 2.0 5 votes vote down vote up
def define_keras_flags():
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
  flags.DEFINE_integer(
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
      '# batches per epoch, then use # batches per epoch. When this flag is '
      'set, only one epoch is going to run for training.')
  flags.DEFINE_integer(
      name='log_steps', default=100,
      help='For every log_steps, we log the timing information such as '
      'examples per second. Besides, for every log_steps, we store the '
      'timestamp of a batch end.') 
Example #20
Source File: benchmark_cnn.py    From parallax with Apache License 2.0 5 votes vote down vote up
def define_flags():
    """Define a command line flag for each ParamSpec in _DEFAULT_PARAMS."""
    define_flag = {
        'boolean': flags.DEFINE_boolean,
        'float': flags.DEFINE_float,
        'integer': flags.DEFINE_integer,
        'string': flags.DEFINE_string,
    }
    for name, param_spec in six.iteritems(_DEFAULT_PARAMS):
        if param_spec.flag_type not in define_flag:
            raise ValueError('Unknown flag_type %s' % param_spec.flag_type)
        else:
            define_flag[param_spec.flag_type](name, param_spec.default_value,
                                              param_spec.description)
            flags.declare_key_flag(name) 
Example #21
Source File: keras_common.py    From ml-on-gcp with Apache License 2.0 5 votes vote down vote up
def define_keras_flags():
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
  flags.DEFINE_integer(
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
      '# batches per epoch, then use # batches per epoch. When this flag is '
      'set, only one epoch is going to run for training.')
  flags.DEFINE_integer(
      name='log_steps', default=100,
      help='For every log_steps, we log the timing information such as '
      'examples per second. Besides, for every log_steps, we store the '
      'timestamp of a batch end.') 
Example #22
Source File: resnet_run_loop.py    From nsfw with Apache License 2.0 5 votes vote down vote up
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
  flags_core.define_performance(num_parallel_calls=False)
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name='resnet_version', short_name='rv', default='1',
      enum_values=['1', '2'],
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
  flags.DEFINE_boolean(
      name='eval_only', default=False,
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))

  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))

  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs) 
Example #23
Source File: benchmark_main.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def define_keras_benchmark_flags():
  """Add flags for keras built-in application models."""
  flags_core.define_base(hooks=False)
  flags_core.define_performance()
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags_core.set_defaults(
      data_format="channels_last",
      use_synthetic_data=True,
      batch_size=32,
      train_epochs=2)

  flags.DEFINE_enum(
      name="model", default=None,
      enum_values=MODELS.keys(), case_sensitive=False,
      help=flags_core.help_wrap(
          "Model to be benchmarked."))

  flags.DEFINE_integer(
      name="num_train_images", default=1000,
      help=flags_core.help_wrap(
          "The number of synthetic images for training. The default value is "
          "1000."))

  flags.DEFINE_integer(
      name="num_eval_images", default=50,
      help=flags_core.help_wrap(
          "The number of synthetic images for evaluation. The default value is "
          "50."))

  flags.DEFINE_boolean(
      name="eager", default=False, help=flags_core.help_wrap(
          "To enable eager execution. Note that if eager execution is enabled, "
          "only one GPU is utilized even if multiple GPUs are provided and "
          "multi_gpu_model is used."))

  flags.DEFINE_boolean(
      name="dist_strat", default=False, help=flags_core.help_wrap(
          "To enable distribution strategy for model training and evaluation. "
          "Number of GPUs used for distribution strategy can be set by the "
          "argument --num_gpus."))

  flags.DEFINE_list(
      name="callbacks",
      default=["ExamplesPerSecondCallback", "LoggingMetricCallback"],
      help=flags_core.help_wrap(
          "A list of (case insensitive) strings to specify the names of "
          "callbacks. For example: `--callbacks ExamplesPerSecondCallback,"
          "LoggingMetricCallback`"))

  @flags.multi_flags_validator(
      ["eager", "dist_strat"],
      message="Both --eager and --dist_strat were set. Only one can be "
              "defined, as DistributionStrategy is not supported in Eager "
              "execution currently.")
  # pylint: disable=unused-variable
  def _check_eager_dist_strat(flag_dict):
    return not(flag_dict["eager"] and flag_dict["dist_strat"]) 
Example #24
Source File: resnet_run_loop.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
  flags_core.define_performance(num_parallel_calls=False,
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                datasets_num_parallel_batches=True)
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name='resnet_version', short_name='rv', default='1',
      enum_values=['1', '2'],
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
  flags.DEFINE_boolean(
      name='eval_only', default=False,
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
  flags.DEFINE_boolean(
      name='image_bytes_as_serving_input', default=False,
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))

  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))

  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs) 
Example #25
Source File: data_async_generation.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def define_flags():
  """Construct flags for the server."""
  flags.DEFINE_integer(name="num_workers", default=multiprocessing.cpu_count(),
                       help="Size of the negative generation worker pool.")
  flags.DEFINE_string(name="data_dir", default=None,
                      help="The data root. (used to construct cache paths.)")
  flags.DEFINE_string(name="cache_id", default=None,
                      help="The cache_id generated in the main process.")
  flags.DEFINE_integer(name="num_readers", default=4,
                       help="Number of reader datasets in training. This sets"
                            "how the epoch files are sharded.")
  flags.DEFINE_integer(name="num_neg", default=None,
                       help="The Number of negative instances to pair with a "
                            "positive instance.")
  flags.DEFINE_integer(name="num_train_positives", default=None,
                       help="The number of positive training examples.")
  flags.DEFINE_integer(name="num_items", default=None,
                       help="Number of items from which to select negatives.")
  flags.DEFINE_integer(name="num_users", default=None,
                       help="The number of unique users. Used for evaluation.")
  flags.DEFINE_integer(name="epochs_per_cycle", default=1,
                       help="The number of epochs of training data to produce"
                            "at a time.")
  flags.DEFINE_integer(name="num_cycles", default=None,
                       help="The number of cycles to produce training data "
                            "for.")
  flags.DEFINE_integer(name="train_batch_size", default=None,
                       help="The batch size with which training TFRecords will "
                            "be chunked.")
  flags.DEFINE_integer(name="eval_batch_size", default=None,
                       help="The batch size with which evaluation TFRecords "
                            "will be chunked.")
  flags.DEFINE_boolean(name="redirect_logs", default=False,
                       help="Catch logs and write them to a file. "
                            "(Useful if this is run as a subprocess)")
  flags.DEFINE_boolean(name="use_tf_logging", default=False,
                       help="Use tf.logging instead of log file.")
  flags.DEFINE_integer(name="seed", default=None,
                       help="NumPy random seed to set at startup. If not "
                            "specified, a seed will not be set.")
  flags.DEFINE_boolean(name="ml_perf", default=None,
                       help="Match MLPerf. See ncf_main.py for details.")
  flags.DEFINE_bool(name="output_ml_perf_compliance_logging", default=None,
                    help="Output the MLPerf compliance logging. See "
                         "ncf_main.py for details.")

  flags.mark_flags_as_required(["data_dir", "cache_id"]) 
Example #26
Source File: aclgen.py    From capirca with Apache License 2.0 4 votes vote down vote up
def SetupFlags():
  flags.DEFINE_string(
      'base_directory',
      './policies',
      'The base directory to look for acls; '
      'typically where you\'d find ./corp and ./prod')
  flags.DEFINE_string(
      'definitions_directory',
      './def',
      'Directory where the definitions can be found.')
  flags.DEFINE_string(
      'policy_file',
      None,
      'Individual policy file to generate.')
  flags.DEFINE_string(
      'output_directory',
      './',
      'Directory to output the rendered acls.')
  flags.DEFINE_boolean(
      'optimize',
      False,
      'Turn on optimization.',
      short_name='o')
  flags.DEFINE_boolean(
      'recursive',
      True,
      'Descend recursively from the base directory rendering acls')
  flags.DEFINE_boolean(
      'debug',
      False,
      'Debug messages')
  flags.DEFINE_boolean(
      'verbose',
      False,
      'Verbose messages')
  flags.DEFINE_list(
      'ignore_directories',
      'DEPRECATED, def',
      'Don\'t descend into directories that look like this string')
  flags.DEFINE_integer(
      'max_renderers',
      10,
      'Max number of rendering processes to use.')
  flags.DEFINE_boolean(
      'shade_check',
      False,
      'Raise an error when a term is completely shaded by a prior term.')
  flags.DEFINE_integer(
      'exp_info',
      2,
      'Print a info message when a term is set to expire in that many weeks.') 
Example #27
Source File: shakespeare_main.py    From models with Apache License 2.0 4 votes vote down vote up
def define_flags():
  """Define the flags for the Shakespeare character LSTM."""
  flags_core.define_base(data_dir=False,
                         clean=False,
                         train_epochs=True,
                         epochs_between_evals=False,
                         stop_threshold=False,
                         num_gpu=True,
                         export_dir=False,
                         run_eagerly=True,
                         distribution_strategy=True)

  flags_core.define_performance(num_parallel_calls=False,
                                inter_op=False,
                                intra_op=False,
                                synthetic_data=False,
                                max_train_steps=False,
                                dtype=True,
                                loss_scale=True,
                                enable_xla=True)

  flags_core.set_defaults(train_epochs=43,
                          batch_size=64)

  flags.DEFINE_boolean(name='enable_eager', default=True, help='Enable eager?')
  flags.DEFINE_boolean(
      name='train', default=True,
      help='If true trains the model.')
  flags.DEFINE_string(
      name='predict_context', default=None,
      help='If set, makes a prediction with the given context.')
  flags.DEFINE_integer(
      name='predict_length', default=1000,
      help='Length of the predicted text including the context.')
  flags.DEFINE_integer(name='train_steps', default=None,
                       help='Overrides train_steps per epoch if not None.')
  flags.DEFINE_integer(
      name='log_steps', default=100,
      help='For every log_steps, we log the timing information such as '
      'examples per second.')
  flags.DEFINE_string(
      name='training_data', default=None,
      help='Path to file containing the training data.')
  flags.DEFINE_boolean(name='cudnn', default=True, help='Use CuDNN LSTM.') 
Example #28
Source File: benchmark_main.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def define_keras_benchmark_flags():
  """Add flags for keras built-in application models."""
  flags_core.define_base(hooks=False)
  flags_core.define_performance()
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags_core.set_defaults(
      data_format="channels_last",
      use_synthetic_data=True,
      batch_size=32,
      train_epochs=2)

  flags.DEFINE_enum(
      name="model", default=None,
      enum_values=MODELS.keys(), case_sensitive=False,
      help=flags_core.help_wrap(
          "Model to be benchmarked."))

  flags.DEFINE_integer(
      name="num_train_images", default=1000,
      help=flags_core.help_wrap(
          "The number of synthetic images for training. The default value is "
          "1000."))

  flags.DEFINE_integer(
      name="num_eval_images", default=50,
      help=flags_core.help_wrap(
          "The number of synthetic images for evaluation. The default value is "
          "50."))

  flags.DEFINE_boolean(
      name="eager", default=False, help=flags_core.help_wrap(
          "To enable eager execution. Note that if eager execution is enabled, "
          "only one GPU is utilized even if multiple GPUs are provided and "
          "multi_gpu_model is used."))

  flags.DEFINE_boolean(
      name="dist_strat", default=False, help=flags_core.help_wrap(
          "To enable distribution strategy for model training and evaluation. "
          "Number of GPUs used for distribution strategy can be set by the "
          "argument --num_gpus."))

  flags.DEFINE_list(
      name="callbacks",
      default=["ExamplesPerSecondCallback", "LoggingMetricCallback"],
      help=flags_core.help_wrap(
          "A list of (case insensitive) strings to specify the names of "
          "callbacks. For example: `--callbacks ExamplesPerSecondCallback,"
          "LoggingMetricCallback`"))

  @flags.multi_flags_validator(
      ["eager", "dist_strat"],
      message="Both --eager and --dist_strat were set. Only one can be "
              "defined, as DistributionStrategy is not supported in Eager "
              "execution currently.")
  # pylint: disable=unused-variable
  def _check_eager_dist_strat(flag_dict):
    return not(flag_dict["eager"] and flag_dict["dist_strat"]) 
Example #29
Source File: resnet_run_loop.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
  flags_core.define_performance(num_parallel_calls=False,
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                datasets_num_parallel_batches=True)
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name='resnet_version', short_name='rv', default='1',
      enum_values=['1', '2'],
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
  flags.DEFINE_boolean(
      name='eval_only', default=False,
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
  flags.DEFINE_boolean(
      name='image_bytes_as_serving_input', default=False,
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))
  flags.DEFINE_boolean(
      name='turn_off_distribution_strategy', default=False,
      help=flags_core.help_wrap('Set to True to not use distribution '
                                'strategies.'))
  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))

  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs) 
Example #30
Source File: resnet_save.py    From inference with Apache License 2.0 4 votes vote down vote up
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
  flags_core.define_performance(num_parallel_calls=False,
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                datasets_num_parallel_batches=True)
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name='resnet_version', short_name='rv', default='1',
      enum_values=['1', '2'],
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
  flags.DEFINE_boolean(
      name='eval_only', default=False,
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
  flags.DEFINE_boolean(
      name='image_bytes_as_serving_input', default=False,
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))

  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))

  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)