Python absl.flags.mark_flags_as_required() Examples
The following are 15
code examples of absl.flags.mark_flags_as_required().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
absl.flags
, or try the search function
.
Example #1
Source File: cache_tasks_main.py From text-to-text-transfer-transformer with Apache License 2.0 | 6 votes |
def main(_): flags.mark_flags_as_required(["output_cache_dir"]) _import_modules(FLAGS.module_import) t5.data.add_global_cache_dirs( [FLAGS.output_cache_dir] + FLAGS.tasks_additional_cache_dirs) output_dirs = [] pipeline_options = beam.options.pipeline_options.PipelineOptions( FLAGS.pipeline_options) with beam.Pipeline(options=pipeline_options) as pipeline: tf.io.gfile.makedirs(FLAGS.output_cache_dir) output_dirs = run_pipeline( pipeline, FLAGS.tasks, FLAGS.output_cache_dir, FLAGS.max_input_examples, FLAGS.excluded_tasks, FLAGS.module_import, FLAGS.overwrite) # TODO(adarob): Figure out a way to write these when each task completes. for output_dir in output_dirs: with tf.io.gfile.GFile(os.path.join(output_dir, "COMPLETED"), "w") as f: f.write("")
Example #2
Source File: onsets_frames_transcription_create_tfrecords.py From magenta with Apache License 2.0 | 5 votes |
def main(argv): del argv flags.mark_flags_as_required(['csv', 'output_directory']) tf.io.gfile.makedirs(FLAGS.output_directory) with tf.io.gfile.GFile(FLAGS.csv) as f: reader = csv.DictReader(f) splits = collections.defaultdict(list) for row in reader: splits[row['split']].append( (os.path.join(FLAGS.midi_dir, row['midi_filename']), os.path.join(FLAGS.wav_dir, row['audio_filename']))) if sorted(splits.keys()) != sorted(FLAGS.expected_splits.split(',')): raise ValueError('Got unexpected set of splits: %s' % list(splits.keys())) pipeline_options = beam.options.pipeline_options.PipelineOptions( FLAGS.pipeline_options) with beam.Pipeline(options=pipeline_options) as p: for split in splits: split_p = p | 'prepare_split_%s' % split >> beam.Create(splits[split]) split_p |= 'create_examples_%s' % split >> beam.ParDo( CreateExampleDoFn(FLAGS.wav_dir, FLAGS.midi_dir, FLAGS.add_wav_glob)) split_p |= 'write_%s' % split >> beam.io.WriteToTFRecord( os.path.join(FLAGS.output_directory, '%s.tfrecord' % split), coder=beam.coders.ProtoCoder(tf.train.Example), num_shards=FLAGS.num_shards)
Example #3
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #4
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #5
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #6
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #7
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #8
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #9
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #10
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #11
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #12
Source File: run_saved_model.py From delta with Apache License 2.0 | 5 votes |
def define_flags(): ''' define flags for evaluator''' # The GPU devices which are visible for current process flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES') flags.DEFINE_string('config', None, help='path to yaml config file') flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'], 'eval or infer') flags.DEFINE_bool('debug', False, 'debug mode') # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330 flags.mark_flags_as_required(['config', 'mode'])
Example #13
Source File: dump_task.py From text-to-text-transfer-transformer with Apache License 2.0 | 5 votes |
def main(_): flags.mark_flags_as_required(["task"]) if FLAGS.module_import: import_modules(FLAGS.module_import) gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param) total_examples = 0 tf.enable_eager_execution() task = t5.data.TaskRegistry.get(FLAGS.task) files = task.tfds_dataset.files(FLAGS.split) def _example_to_string(ex): key_to_string = {} for k in ("inputs", "targets"): if k in ex: v = ex[k].numpy() key_to_string[k] = ( " ".join(str(i) for i in v) if FLAGS.tokenize else v.decode("utf-8")) else: v[k] = "" return FLAGS.format_string.format(**key_to_string) for shard_path in files: logging.info("Processing shard: %s", shard_path) ds = task.tfds_dataset.load_shard(shard_path) ds = task.preprocess_text(ds) if FLAGS.tokenize: ds = t5.data.encode_string_features( ds, task.output_features, keys=task.output_features, copy_plaintext=True) ds = task.preprocess_tokens(ds, sequence_length()) for ex in ds: print(_example_to_string(ex)) total_examples += 1 if total_examples == FLAGS.max_examples: return
Example #14
Source File: selfplay.py From training with Apache License 2.0 | 5 votes |
def main(unused_argv): flags.mark_flags_as_required(['bucket_name', 'mode']) if FLAGS.mode == 'cc': run_cc() elif FLAGS.mode == 'tpu': run_tpu(no_resign=False) elif FLAGS.mode == 'tpu_nr': run_tpu(no_resign=True)
Example #15
Source File: data_async_generation.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def define_flags(): """Construct flags for the server.""" flags.DEFINE_integer(name="num_workers", default=multiprocessing.cpu_count(), help="Size of the negative generation worker pool.") flags.DEFINE_string(name="data_dir", default=None, help="The data root. (used to construct cache paths.)") flags.DEFINE_string(name="cache_id", default=None, help="The cache_id generated in the main process.") flags.DEFINE_integer(name="num_readers", default=4, help="Number of reader datasets in training. This sets" "how the epoch files are sharded.") flags.DEFINE_integer(name="num_neg", default=None, help="The Number of negative instances to pair with a " "positive instance.") flags.DEFINE_integer(name="num_train_positives", default=None, help="The number of positive training examples.") flags.DEFINE_integer(name="num_items", default=None, help="Number of items from which to select negatives.") flags.DEFINE_integer(name="num_users", default=None, help="The number of unique users. Used for evaluation.") flags.DEFINE_integer(name="epochs_per_cycle", default=1, help="The number of epochs of training data to produce" "at a time.") flags.DEFINE_integer(name="num_cycles", default=None, help="The number of cycles to produce training data " "for.") flags.DEFINE_integer(name="train_batch_size", default=None, help="The batch size with which training TFRecords will " "be chunked.") flags.DEFINE_integer(name="eval_batch_size", default=None, help="The batch size with which evaluation TFRecords " "will be chunked.") flags.DEFINE_boolean(name="redirect_logs", default=False, help="Catch logs and write them to a file. " "(Useful if this is run as a subprocess)") flags.DEFINE_boolean(name="use_tf_logging", default=False, help="Use tf.logging instead of log file.") flags.DEFINE_integer(name="seed", default=None, help="NumPy random seed to set at startup. If not " "specified, a seed will not be set.") flags.DEFINE_boolean(name="ml_perf", default=None, help="Match MLPerf. See ncf_main.py for details.") flags.DEFINE_bool(name="output_ml_perf_compliance_logging", default=None, help="Output the MLPerf compliance logging. See " "ncf_main.py for details.") flags.mark_flags_as_required(["data_dir", "cache_id"])