Python utils.visualize() Examples
The following are 8
code examples of utils.visualize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: demo_graph.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) category = utils.get_category(config) anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() try: path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) except (FileNotFoundError, ValueError): logging.warning('model cannot be loaded') state_dict = None dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), anchors, len(category)) logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values()))) if state_dict is not None: dnn.load_state_dict(state_dict) height, width = tuple(map(int, config.get('image', 'size').split())) image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height, width)) output = dnn(image) state_dict = dnn.state_dict() graph = utils.visualize.Graph(config, state_dict) graph(output.grad_fn) diff = [key for key in state_dict if key not in graph.drawn] if diff: logging.warning('variables not shown: ' + str(diff)) path = graph.dot.view(os.path.basename(model_dir) + '.gv', os.path.dirname(model_dir)) logging.info(path)
Example #2
Source File: main_mmd.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(_): pp.pprint(flags.FLAGS.__flags) sample_dir_ = os.path.join(FLAGS.sample_dir, FLAGS.name) checkpoint_dir_ = os.path.join(FLAGS.checkpoint_dir, FLAGS.name) log_dir_ = os.path.join(FLAGS.log_dir, FLAGS.name) if not os.path.exists(checkpoint_dir_): os.makedirs(checkpoint_dir_) if not os.path.exists(sample_dir_): os.makedirs(sample_dir_) if not os.path.exists(log_dir_): os.makedirs(log_dir_) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=checkpoint_dir_, sample_dir=sample_dir_, log_dir=log_dir_) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.sampling(FLAGS) if FLAGS.visualize: to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 visualize(sess, dcgan, FLAGS, OPTION)
Example #3
Source File: main_tmmd.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(_): pp.pprint(flags.FLAGS.__flags) sample_dir_ = os.path.join(FLAGS.sample_dir, FLAGS.name) checkpoint_dir_ = os.path.join(FLAGS.checkpoint_dir, FLAGS.name) log_dir_ = os.path.join(FLAGS.log_dir, FLAGS.name) if not os.path.exists(checkpoint_dir_): os.makedirs(checkpoint_dir_) if not os.path.exists(sample_dir_): os.makedirs(sample_dir_) if not os.path.exists(log_dir_): os.makedirs(log_dir_) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=checkpoint_dir_, sample_dir=sample_dir_, log_dir=log_dir_) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.sampling(FLAGS) if FLAGS.visualize: to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 visualize(sess, dcgan, FLAGS, OPTION)
Example #4
Source File: main_mmd_fm.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(_): pp.pprint(flags.FLAGS.__flags) sample_dir_ = os.path.join(FLAGS.sample_dir, FLAGS.name) checkpoint_dir_ = os.path.join(FLAGS.checkpoint_dir, FLAGS.name) log_dir_ = os.path.join(FLAGS.log_dir, FLAGS.name) if not os.path.exists(checkpoint_dir_): os.makedirs(checkpoint_dir_) if not os.path.exists(sample_dir_): os.makedirs(sample_dir_) if not os.path.exists(log_dir_): os.makedirs(log_dir_) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=checkpoint_dir_, sample_dir=sample_dir_, log_dir=log_dir_) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.sampling(FLAGS) #dcgan.load(FLAGS.checkpoint_dir) if FLAGS.visualize: to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 visualize(sess, dcgan, FLAGS, OPTION)
Example #5
Source File: main_non_linear_3DMM.py From Nonlinear_Face_3DMM with Apache License 2.0 | 5 votes |
def main(_): #pp.pprint(FLAGS.__flags) pp.pprint(tf.app.flags.FLAGS.flag_values_dict()) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.samples_dir): os.makedirs(FLAGS.samples_dir) gpu_options = tf.GPUOptions(visible_device_list =FLAGS.gpu, per_process_gpu_memory_fraction = 0.8, allow_growth = True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)) as sess: dcgan = DCGAN(sess, FLAGS) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.load(FLAGS.checkpoint_dir) dcgan.test(FLAGS, True) ''' if FLAGS.visualize: to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 visualize(sess, dcgan, FLAGS, OPTION)'''
Example #6
Source File: mainmhl.py From WaterGAN with MIT License | 4 votes |
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: wgan = WGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, input_water_width=FLAGS.input_water_width, input_water_height=FLAGS.input_water_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, c_dim=FLAGS.c_dim, max_depth = FLAGS.max_depth, save_epoch=FLAGS.save_epoch, water_dataset_name=FLAGS.water_dataset, air_dataset_name = FLAGS.air_dataset, depth_dataset_name = FLAGS.depth_dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, results_dir = FLAGS.results_dir, sample_dir=FLAGS.sample_dir, num_samples = FLAGS.num_samples) if FLAGS.is_train: wgan.train(FLAGS) else: if not wgan.load(FLAGS.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode") wgan.test(FLAGS) # to_json("./web/js/layers.js", [wgan.h0_w, wgan.h0_b, wgan.g_bn0], # [wgan.h1_w, wgan.h1_b, wgan.g_bn1], # [wgan.h2_w, wgan.h2_b, wgan.g_bn2], # [wgan.h3_w, wgan.h3_b, wgan.g_bn3], # [wgan.h4_w, wgan.h4_b, None]) # Below is codes for visualization #OPTION = 1 #visualize(sess, wgan, FLAGS, OPTION)
Example #7
Source File: mainjamaica.py From WaterGAN with MIT License | 4 votes |
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: wgan = WGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, input_water_width=FLAGS.input_water_width, input_water_height=FLAGS.input_water_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, c_dim=FLAGS.c_dim, max_depth = FLAGS.max_depth, save_epoch=FLAGS.save_epoch, water_dataset_name=FLAGS.water_dataset, air_dataset_name = FLAGS.air_dataset, depth_dataset_name = FLAGS.depth_dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, results_dir = FLAGS.results_dir, sample_dir=FLAGS.sample_dir, num_samples = FLAGS.num_samples) if FLAGS.is_train: wgan.train(FLAGS) else: if not wgan.load(FLAGS.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode") wgan.test(FLAGS) # to_json("./web/js/layers.js", [wgan.h0_w, wgan.h0_b, wgan.g_bn0], # [wgan.h1_w, wgan.h1_b, wgan.g_bn1], # [wgan.h2_w, wgan.h2_b, wgan.g_bn2], # [wgan.h3_w, wgan.h3_b, wgan.g_bn3], # [wgan.h4_w, wgan.h4_b, None]) # Below is codes for visualization #OPTION = 1 #visualize(sess, wgan, FLAGS, OPTION)
Example #8
Source File: pacgan_task.py From PacGAN with MIT License | 4 votes |
def main(self): FLAGS = Struct(**self._config) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height FLAGS.checkpoint_dir = os.path.join(self._work_dir, "checkpoint") if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) FLAGS.sample_dir = os.path.join(self._work_dir, "samples") if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) FLAGS.work_dir = self._work_dir #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True if FLAGS.random: seed = random.randint(1, 100000) np.random.seed(seed) with open(os.path.join(self._work_dir, "seed.txt"), "w") as f: f.write("{}".format(seed)) t_num_test_samples = int(ceil(float(FLAGS.num_test_sample) / float(FLAGS.batch_size))) * FLAGS.batch_size test_samples = np.random.uniform(-1, 1, size = (t_num_test_samples, FLAGS.z_dim)) with tf.Session(config=run_config) as sess: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, packing_num=FLAGS.packing_num, num_training_sample=FLAGS.num_training_sample, num_test_sample=FLAGS.num_test_sample, z_dim=FLAGS.z_dim, test_samples=test_samples) show_all_variables() dcgan.train(FLAGS) #OPTION = 0 #visualize(sess, dcgan, FLAGS, OPTION)