Python tensorflow.python.platform.app.run() Examples
The following are 30
code examples of tensorflow.python.platform.app.run().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.platform.app
, or try the search function
.
Example #1
Source File: benchmark.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def benchmarks_main(true_main, argv=None): """Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv). """ if argv is None: argv = sys.argv found_arg = [arg for arg in argv if arg.startswith("--benchmarks=") or arg.startswith("-benchmarks=")] if found_arg: # Remove --benchmarks arg from sys.argv argv.remove(found_arg[0]) regex = found_arg[0].split("=")[1] app.run(lambda _: _run_benchmarks(regex), argv=argv) else: true_main()
Example #2
Source File: benchmark.py From deep_image_model with Apache License 2.0 | 6 votes |
def benchmarks_main(true_main): """Run benchmarks as declared in args. Args: true_main: True main function to run if benchmarks are not requested. """ argv = sys.argv found_arg = [arg for arg in argv if arg.startswith("--benchmarks=") or arg.startswith("-benchmarks=")] if found_arg: # Remove --benchmarks arg from sys.argv argv.remove(found_arg[0]) regex = found_arg[0].split("=")[1] app.run(lambda _: _run_benchmarks(regex)) else: true_main()
Example #3
Source File: benchmark.py From deep_image_model with Apache License 2.0 | 6 votes |
def report_benchmark( self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None): """Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with `name`. Otherwise it is inferred from the top-level method name. """ name = self._get_name(overwrite_name=name) _global_report_benchmark( name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras)
Example #4
Source File: apply_noise_no_resize.py From robust_physical_perturbations with MIT License | 6 votes |
def main(argv=None): with tf.device(FLAGS.device): with tf.Session() as sess: print "Noise loaded from", FLAGS.model_path print "Mask", FLAGS.attack_mask print "Source image", FLAGS.src_image bimg = cv2.resize(read_img(FLAGS.src_image), (FLAGS.img_rows, FLAGS.img_cols))/255.0 - 0.5 noise= tf.Variable(tf.random_uniform( \ [FLAGS.img_rows, FLAGS.img_cols, FLAGS.nb_channels], -0.5, 0.5), \ name='noiseattack/noise', collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'adv_var']) saver = tf.train.Saver(var_list=[noise]) saver.restore(sess, FLAGS.model_path) noise_val = sess.run(noise) write_img('noise.png', (noise_val)*255.0) mask = read_img(FLAGS.attack_mask)/255.0 noise_val = noise_val*mask write_img(FLAGS.output_path,(bimg+noise_val+0.5)*255) print "Wrote image to", FLAGS.output_path
Example #5
Source File: train_yadav.py From robust_physical_perturbations with MIT License | 6 votes |
def main(argv=None): X_train, Y_train, X_test, Y_test = gtsrb(FLAGS.train_dataset, FLAGS.test_dataset, labels_filename=FLAGS.labels) print 'Loaded GTSRB data' X_train = np.asarray(map(lambda x: pre_process_image(x), X_train.astype(np.uint8)),dtype=np.float32) X_test = np.asarray(map(lambda x: pre_process_image(x), X_test.astype(np.uint8)),dtype=np.float32) global total_iterations global best_validation_accuracy global last_improvement global best_test_accuracy global val_acc_list global batch_acc_list global test_acc_list with tf.Session() as sess: model = YadavModel() sess.run(tf.initialize_all_variables()) #X_train, Y_train = gen_transformed_data(X_train,Y_train,43,10,30,5,5,1) print(X_train.shape) print(Y_train.shape) optimize(sess, model, X_train, Y_train, X_test, Y_test, 10000, 128)
Example #6
Source File: custom_tflite_convert.py From keras-YOLOv3-model-set with MIT License | 6 votes |
def _get_tf2_parser(): """Returns ArgumentParser for tflite_convert for TensorFlow 2.0.""" parser = argparse.ArgumentParser( description=("Command line tool to run TensorFlow Lite Converter.")) # Output file flag. parser.add_argument( "--output_file", type=str, help="Full filepath of the output file.", required=True) # Input file flags. input_file_group = parser.add_mutually_exclusive_group(required=True) input_file_group.add_argument( "--saved_model_dir", type=str, help="Full path of the directory containing the SavedModel.") input_file_group.add_argument( "--keras_model_file", type=str, help="Full filepath of HDF5 file containing tf.Keras model.") return parser
Example #7
Source File: classify_yadav.py From robust_physical_perturbations with MIT License | 6 votes |
def main(argv=None): imgnames = filter(lambda x: x.lower().endswith(".jpg") or x.lower().endswith(".png"), os.listdir(FLAGS.srcimgs)) imgs = np.asarray(map(lambda x: preprocess_yadav(x), map(lambda x: cv2.resize(read_img(os.path.join(FLAGS.srcimgs, x)), (FLAGS.img_cols, FLAGS.img_rows)), imgnames)) , dtype=np.float32) print 'Loaded images from %s'%FLAGS.srcimgs sys.stdout.flush() results = [] with tf.Session() as sess: model = YadavModel(train=False) saver = tf.train.Saver() saver.restore(sess, FLAGS.weights) print 'Loaded model from %s'%FLAGS.weights sys.stdout.flush() output = sess.run(model.labels_pred, feed_dict={model.features: imgs, model.keep_prob: 1.0}) for i in range(len(imgs)): results.append((imgnames[i], top3_as_string(output, i))) for i in range(len(results)): print results[i][0], results[i][1]
Example #8
Source File: benchmark.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def report_benchmark( self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None): """Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) median or mean cpu time in seconds. wall_time: (optional) median or mean wall time in seconds. throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with `name`. Otherwise it is inferred from the top-level method name. """ name = self._get_name(overwrite_name=name) _global_report_benchmark( name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras)
Example #9
Source File: benchmark.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def benchmarks_main(true_main, argv=None): """Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv). """ if argv is None: argv = sys.argv found_arg = [arg for arg in argv if arg.startswith("--benchmarks=") or arg.startswith("-benchmarks=")] if found_arg: # Remove --benchmarks arg from sys.argv argv.remove(found_arg[0]) regex = found_arg[0].split("=")[1] app.run(lambda _: _run_benchmarks(regex), argv=argv) else: true_main()
Example #10
Source File: benchmark.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def report_benchmark( self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None): """Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with `name`. Otherwise it is inferred from the top-level method name. """ name = self._get_name(overwrite_name=name) _global_report_benchmark( name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras)
Example #11
Source File: benchmark.py From keras-lambda with MIT License | 6 votes |
def report_benchmark( self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None): """Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with `name`. Otherwise it is inferred from the top-level method name. """ name = self._get_name(overwrite_name=name) _global_report_benchmark( name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras)
Example #12
Source File: benchmark.py From lambda-packs with MIT License | 6 votes |
def benchmarks_main(true_main, argv=None): """Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv). """ if argv is None: argv = sys.argv found_arg = [arg for arg in argv if arg.startswith("--benchmarks=") or arg.startswith("-benchmarks=")] if found_arg: # Remove --benchmarks arg from sys.argv argv.remove(found_arg[0]) regex = found_arg[0].split("=")[1] app.run(lambda _: _run_benchmarks(regex), argv=argv) else: true_main()
Example #13
Source File: benchmark.py From keras-lambda with MIT License | 6 votes |
def benchmarks_main(true_main, argv=None): """Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv). """ if argv is None: argv = sys.argv found_arg = [arg for arg in argv if arg.startswith("--benchmarks=") or arg.startswith("-benchmarks=")] if found_arg: # Remove --benchmarks arg from sys.argv argv.remove(found_arg[0]) regex = found_arg[0].split("=")[1] app.run(lambda _: _run_benchmarks(regex), argv=argv) else: true_main()
Example #14
Source File: benchmark.py From lambda-packs with MIT License | 6 votes |
def report_benchmark( self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None): """Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with `name`. Otherwise it is inferred from the top-level method name. """ name = self._get_name(overwrite_name=name) _global_report_benchmark( name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras)
Example #15
Source File: make_cifar10_joblib.py From cleverhans with MIT License | 5 votes |
def main(argv): model_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) if model_file is None: print('No model found') sys.exit() set_log_level(logging.DEBUG) sess = tf.Session() with sess.as_default(): model = make_wresnet() saver = tf.train.Saver() # Restore the checkpoint saver.restore(sess, model_file) SCOPE = "cifar10_challenge" model2 = make_wresnet(scope=SCOPE) assert len(model.get_vars()) == len(model2.get_vars()) found = [False] * len(model2.get_vars()) for var1 in model.get_vars(): var1_found = False var2_name = SCOPE + "/" + var1.name for idx, var2 in enumerate(model2.get_vars()): if var2.name == var2_name: var1_found = True found[idx] = True sess.run(tf.assign(var2, var1)) break assert var1_found, var1.name assert all(found) model2.dataset_factory = Factory(CIFAR10, {"max_val": 255}) serial.save("model.joblib", model2)
Example #16
Source File: context.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def enable_eager_execution(): """Enables, for the rest of the lifetime of this program, eager execution. If not called immediately on startup risks creating breakage and bugs. Calling this method more than once in the same process will lead to an exception. Example: ```python # Before eager execution is enabled, `Tensor`s are symbolic and do not hold # concrete values (they are to be executed in a `tf.Session`). assert not hasattr(tf.multiply(6, 7), "numpy") tfe.enable_eager_execution() # After eager execution is enabled, operations are executed as they are # defined and `Tensor`s hold concrete values, which can be accessed as # `numpy.ndarray`s through the `numpy()` method. assert tf.multiply(6, 7).numpy() == 42 ``` Raises: ValueError: If this method has already been invoked in the current process. """ global _default_mode if _default_mode == EAGER_MODE: func_name = ( "tfe." + tf_inspect.getframeinfo(tf_inspect.currentframe()).function) raise ValueError( "Do not call %s more than once in the same process. Note eager-mode " "methods such as tfe.run() also call %s." % (func_name, func_name)) _default_mode = EAGER_MODE
Example #17
Source File: googletest.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def main(argv=None): # pylint: disable=function-redefined def main_wrapper(): args = argv if args is None: args = sys.argv return app.run(main=g_main, argv=args) benchmark.benchmarks_main(true_main=main_wrapper)
Example #18
Source File: googletest.py From keras-lambda with MIT License | 5 votes |
def main(argv=None): # pylint: disable=function-redefined def main_wrapper(): args = argv if args is None: args = sys.argv return app.run(main=g_main, argv=args) benchmark.benchmarks_main(true_main=main_wrapper)
Example #19
Source File: benchmark.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _run_benchmarks(regex): """Run benchmarks that match regex `regex`. This function goes through the global benchmark registry, and matches benchmark class and method names of the form `module.name.BenchmarkClass.benchmarkMethod` to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. """ registry = list(GLOBAL_BENCHMARK_REGISTRY) # Match benchmarks in registry against regex for benchmark in registry: benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__) attrs = dir(benchmark) # Don't instantiate the benchmark class unless necessary benchmark_instance = None for attr in attrs: if not attr.startswith("benchmark"): continue candidate_benchmark_fn = getattr(benchmark, attr) if not callable(candidate_benchmark_fn): continue full_benchmark_name = "%s.%s" % (benchmark_name, attr) if regex == "all" or re.search(regex, full_benchmark_name): # Instantiate the class if it hasn't been instantiated benchmark_instance = benchmark_instance or benchmark() # Get the method tied to the class instance_benchmark_fn = getattr(benchmark_instance, attr) # Call the instance method instance_benchmark_fn()
Example #20
Source File: prediction_train.py From HumanRecognition with MIT License | 5 votes |
def main(unused_argv): print('Constructing models and inputs.') with tf.variable_scope('model', reuse=None) as training_scope: images, actions, states = build_tfrecord_input(training=True) model = Model(images, actions, states, FLAGS.sequence_length, prefix='train') with tf.variable_scope('val_model', reuse=None): val_images, val_actions, val_states = build_tfrecord_input(training=False) val_model = Model(val_images, val_actions, val_states, FLAGS.sequence_length, training_scope, prefix='val') print('Constructing saver.') # Make saver. saver = tf.train.Saver( tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=0) # Make training session. sess = tf.InteractiveSession() summary_writer = tf.summary.FileWriter( FLAGS.event_log_dir, graph=sess.graph, flush_secs=10) if FLAGS.pretrained_model: saver.restore(sess, FLAGS.pretrained_model) tf.train.start_queue_runners(sess) sess.run(tf.global_variables_initializer()) tf.logging.info('iteration number, cost') # Run training. for itr in range(FLAGS.num_iterations): # Generate new batch of data. feed_dict = {model.iter_num: np.float32(itr), model.lr: FLAGS.learning_rate} cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op], feed_dict) # Print info: iteration #, cost. tf.logging.info(str(itr) + ' ' + str(cost)) if (itr) % VAL_INTERVAL == 2: # Run through validation set. feed_dict = {val_model.lr: 0.0, val_model.iter_num: np.float32(itr)} _, val_summary_str = sess.run([val_model.train_op, val_model.summ_op], feed_dict) summary_writer.add_summary(val_summary_str, itr) if (itr) % SAVE_INTERVAL == 2: tf.logging.info('Saving model.') saver.save(sess, FLAGS.output_dir + '/model' + str(itr)) if (itr) % SUMMARY_INTERVAL: summary_writer.add_summary(summary_str, itr) tf.logging.info('Saving model.') saver.save(sess, FLAGS.output_dir + '/model') tf.logging.info('Training complete') tf.logging.flush()
Example #21
Source File: custom_tflite_convert.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def main(): app.run(main=run_main, argv=sys.argv[:1])
Example #22
Source File: benchmark.py From keras-lambda with MIT License | 5 votes |
def _run_benchmarks(regex): """Run benchmarks that match regex `regex`. This function goes through the global benchmark registry, and matches benchmark class and method names of the form `module.name.BenchmarkClass.benchmarkMethod` to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. """ registry = list(GLOBAL_BENCHMARK_REGISTRY) # Match benchmarks in registry against regex for benchmark in registry: benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__) attrs = dir(benchmark) # Don't instantiate the benchmark class unless necessary benchmark_instance = None for attr in attrs: if not attr.startswith("benchmark"): continue candidate_benchmark_fn = getattr(benchmark, attr) if not callable(candidate_benchmark_fn): continue full_benchmark_name = "%s.%s" % (benchmark_name, attr) if regex == "all" or re.search(regex, full_benchmark_name): # Instantiate the class if it hasn't been instantiated benchmark_instance = benchmark_instance or benchmark() # Get the method tied to the class instance_benchmark_fn = getattr(benchmark_instance, attr) # Call the instance method instance_benchmark_fn()
Example #23
Source File: run_executor.py From tfx with Apache License 2.0 | 5 votes |
def _run_transform(args, beam_pipeline_args): """Construct and run transform executor.""" absl.logging.set_verbosity(absl.logging.INFO) inputs = { labels.ANALYZE_DATA_PATHS_LABEL: args.analyze_examples, labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] * len(args.analyze_examples), labels.TRANSFORM_DATA_PATHS_LABEL: [ args.analyze_examples + args.transform_only_examples ], labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] * (len(args.analyze_examples) + len(args.transform_only_examples)), labels.SCHEMA_PATH_LABEL: args.input_schema_path, labels.PREPROCESSING_FN: args.preprocessing_fn_path, labels.EXAMPLES_DATA_FORMAT_LABEL: args.example_data_format, labels.COMPUTE_STATISTICS_LABEL: args.compute_statistics, labels.BEAM_PIPELINE_ARGS: beam_pipeline_args, } outputs = { labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: args.transform_fn, labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: ( args.transformed_examples), labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: (args.per_set_stats_outputs), labels.TEMP_OUTPUT_LABEL: args.tmp_location, } executor = Executor(Executor.Context(beam_pipeline_args=beam_pipeline_args)) executor.Transform(inputs, outputs, args.status_file)
Example #24
Source File: flags_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def main(_): # unittest.main() tries to interpret the unknown flags, so use the # direct functions instead. runner = unittest.TextTestRunner() itersuite = unittest.TestLoader().loadTestsFromTestCase(FlagsTest) runner.run(itersuite)
Example #25
Source File: attack_model_featadv.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(argv): # Set TF random seed to improve reproducibility tf.set_random_seed(1234) input_shape = [FLAGS.batch_size, 224, 224, 3] x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.)) x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.)) print("Input shape:") print(input_shape) model = make_imagenet_cnn(input_shape) attack = FastFeatureAdversaries(model) attack_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1., 'nb_iter': FLAGS.nb_iter, 'eps_iter': 0.01, 'layer': FLAGS.layer} x_adv = attack.generate(x_src, x_guide, **attack_params) h_adv = model.fprop(x_adv)[FLAGS.layer] h_src = model.fprop(x_src)[FLAGS.layer] h_guide = model.fprop(x_guide)[FLAGS.layer] with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) ha, hs, hg, xa, xs, xg = sess.run( [h_adv, h_src, h_guide, x_adv, x_src, x_guide]) print("L2 distance between source and adversarial example `%s`: %.4f" % (FLAGS.layer, ((hs-ha)*(hs-ha)).sum())) print("L2 distance between guide and adversarial example `%s`: %.4f" % (FLAGS.layer, ((hg-ha)*(hg-ha)).sum())) print("L2 distance between source and guide `%s`: %.4f" % (FLAGS.layer, ((hg-hs)*(hg-hs)).sum())) print("Maximum perturbation: %.4f" % np.abs((xa-xs)).max()) print("Original features: ") print(hs[:10, :10]) print("Adversarial features: ") print(ha[:10, :10])
Example #26
Source File: benchmark.py From deep_image_model with Apache License 2.0 | 5 votes |
def _run_benchmarks(regex): """Run benchmarks that match regex `regex`. This function goes through the global benchmark registry, and matches benchmark class and method names of the form `module.name.BenchmarkClass.benchmarkMethod` to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. """ registry = list(GLOBAL_BENCHMARK_REGISTRY) # Match benchmarks in registry against regex for benchmark in registry: benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__) attrs = dir(benchmark) # Don't instantiate the benchmark class unless necessary benchmark_instance = None for attr in attrs: if not attr.startswith("benchmark"): continue candidate_benchmark_fn = getattr(benchmark, attr) if not callable(candidate_benchmark_fn): continue full_benchmark_name = "%s.%s" % (benchmark_name, attr) if regex == "all" or re.search(regex, full_benchmark_name): # Instantiate the class if it hasn't been instantiated benchmark_instance = benchmark_instance or benchmark() # Get the method tied to the class instance_benchmark_fn = getattr(benchmark_instance, attr) # Call the instance method instance_benchmark_fn()
Example #27
Source File: benchmark.py From lambda-packs with MIT License | 5 votes |
def _run_benchmarks(regex): """Run benchmarks that match regex `regex`. This function goes through the global benchmark registry, and matches benchmark class and method names of the form `module.name.BenchmarkClass.benchmarkMethod` to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. """ registry = list(GLOBAL_BENCHMARK_REGISTRY) # Match benchmarks in registry against regex for benchmark in registry: benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__) attrs = dir(benchmark) # Don't instantiate the benchmark class unless necessary benchmark_instance = None for attr in attrs: if not attr.startswith("benchmark"): continue candidate_benchmark_fn = getattr(benchmark, attr) if not callable(candidate_benchmark_fn): continue full_benchmark_name = "%s.%s" % (benchmark_name, attr) if regex == "all" or re.search(regex, full_benchmark_name): # Instantiate the class if it hasn't been instantiated benchmark_instance = benchmark_instance or benchmark() # Get the method tied to the class instance_benchmark_fn = getattr(benchmark_instance, attr) # Call the instance method instance_benchmark_fn()
Example #28
Source File: googletest.py From lambda-packs with MIT License | 5 votes |
def main(argv=None): # pylint: disable=function-redefined def main_wrapper(): args = argv if args is None: args = sys.argv return app.run(main=g_main, argv=args) benchmark.benchmarks_main(true_main=main_wrapper)
Example #29
Source File: train_yadav.py From robust_physical_perturbations with MIT License | 5 votes |
def acc_eval(session, model, X_input, Y_input, batch_size): num_batches = X_input.shape[0] // batch_size cur_acc = 0 for acc_batch in range(num_batches): start = acc_batch*batch_size end = min(len(X_input), start + batch_size) cur_batch_len = end-start feed_dict_eval = {model.features: X_input[start:end,:,:,:], model.labels_true: Y_input[start:end,:], model.keep_prob:1.0} cur_acc += session.run(model.accuracy, feed_dict=feed_dict_eval)*cur_batch_len return cur_acc/X_input.shape[0]
Example #30
Source File: calc_accuracy_yadav.py From robust_physical_perturbations with MIT License | 5 votes |
def main(argv=None): X_train, Y_train, X_test, Y_test = gtsrb(FLAGS.train_dataset, FLAGS.test_dataset) print 'Loaded GTSRB data' X_test = np.asarray(map(lambda x: pre_process_image(x), X_test.astype(np.uint8)),dtype=np.float32) with tf.Session() as sess: model = YadavModel() saver = tf.train.Saver() saver.restore(sess=sess, save_path=FLAGS.weights) print 'Accuracy on test data', sess.run(model.accuracy, feed_dict={model.features: X_test, model.labels_true: Y_test, model.keep_prob:1.0})