Python model.model() Examples

The following are 30 code examples of model.model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module model , or try the search function .
Example #1
Source File: enjoy-adv.py    From cleverhans with MIT License 6 votes vote down vote up
def __init__(self, env, dueling, noisy, fname):
    self.g = tf.Graph()
    self.noisy = noisy
    self.dueling = dueling
    self.env = env
    with self.g.as_default():
      self.act = deepq.build_act_enjoy(
          make_obs_ph=lambda name: U.Uint8Input(
              env.observation_space.shape, name=name),
          q_func=dueling_model if dueling else model,
          num_actions=env.action_space.n,
          noisy=noisy
      )
      self.saver = tf.train.Saver()
    self.sess = tf.Session(graph=self.g)

    if fname is not None:
      print('Loading Model...')
      self.saver.restore(self.sess, fname) 
Example #2
Source File: multigpu_train.py    From EAST_ICPR with GNU General Public License v3.0 6 votes vote down vote up
def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=True)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)

    return total_loss, model_loss 
Example #3
Source File: east_inference.py    From uai-sdk with Apache License 2.0 6 votes vote down vote up
def load_model(self):
    sess = tf.Session()

    with tf.get_default_graph().as_default():
      input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
      global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)

      f_score, f_geometry = model.model(input_images, is_training=False)

      variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
      saver = tf.train.Saver(variable_averages.variables_to_restore())

      with sess.as_default():
        model_path = tf.train.latest_checkpoint(self.model_dir)
        saver.restore(sess, model_path)

    self._f_score = f_score
    self._f_geometry = f_geometry
    self._sess = sess
    self._input_images = input_images 
Example #4
Source File: train.py    From NoisyNet-DQN with MIT License 6 votes vote down vote up
def maybe_load_model(savedir, container):
    """Load model if present at the specified path."""
    if savedir is None:
        return

    state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
    if container is not None:
        logger.log("Attempting to download model from Azure")
        found_model = container.get(savedir, 'training_state.pkl.zip')
    else:
        found_model = os.path.exists(state_path)
    if found_model:
        state = pickle_load(state_path, compression=True)
        model_dir = "model-{}".format(state["num_iters"])
        if container is not None:
            container.get(savedir, model_dir)
        U.load_state(os.path.join(savedir, model_dir, "saved"))
        logger.log("Loaded models checkpoint at {} iterations".format(state["num_iters"]))
        return state 
Example #5
Source File: train.py    From NoisyNet-DQN with MIT License 6 votes vote down vote up
def maybe_save_model(savedir, container, state):
    """This function checkpoints the model and state of the training algorithm."""
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)
    if container is not None:
        container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')
    relatively_safe_pickle_dump(state["monitor_state"], os.path.join(savedir, 'monitor_state.pkl'))
    if container is not None:
        container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time)) 
Example #6
Source File: complete_scan.py    From ScanComplete with Apache License 2.0 6 votes vote down vote up
def predict_from_model(logit_groups_geometry, logit_groups_semantics,
                       temperature):
  """Reconstruct predicted geometry and semantics from model output."""
  predictions_geometry_list = []
  for logit_group in logit_groups_geometry:
    if FLAGS.p_norm > 0:
      predictions_geometry_list.append(logit_group[:, :, :, :, 0])
    else:
      logit_group_shape = logit_group.shape_as_list()
      logit_group = tf.reshape(logit_group, [-1, logit_group_shape[-1]])
      samples = tf.multinomial(temperature * logit_group, 1)
      predictions_geometry_list.append(
          tf.reshape(samples, logit_group_shape[:-1]))
  predictions_semantics_list = []
  if FLAGS.predict_semantics:
    for logit_group in logit_groups_semantics:
      predictions_semantics_list.append(tf.argmax(logit_group, 4))
  else:
    predictions_semantics_list = [
        tf.zeros(shape=predictions_geometry_list[0].shape, dtype=tf.uint8)
    ] * len(predictions_geometry_list)
  return predictions_geometry_list, predictions_semantics_list 
Example #7
Source File: enjoy-adv.py    From rl-attack with MIT License 6 votes vote down vote up
def __init__(self, env, dueling, noisy, fname):
		self.g = tf.Graph()
		self.noisy = noisy
		self.dueling = dueling 
		self.env = env
		with self.g.as_default():
			self.act = deepq.build_act_enjoy(
				make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name),
				q_func=dueling_model if dueling else model,
				num_actions=env.action_space.n,
				noisy=noisy
				)
			self.saver = tf.train.Saver()
		self.sess = tf.Session(graph=self.g)	
		
		if fname is not None:
			print ('Loading Model...')
			self.saver.restore(self.sess, fname) 
Example #8
Source File: process.py    From DeepRNN with MIT License 6 votes vote down vote up
def create_eval_model(model_creator, hparams, data_dir):
    """Create eval graph, model and iterator."""
    eval_data_path = []
    for root, _, name in os.walk(os.path.join(data_dir, 'eval_data')):
        for x in name:
            if x.split('.')[-1] == 'mat':
                eval_data_path.append(os.path.join(root, x))
    assert len(eval_data_path) == 1
    eval_data = scio.loadmat(*eval_data_path)['data']
    data_mean, data_std = load_data_mean_std(hparams, data_dir)
    batch_size = eval_data.shape[0]
    graph = tf.Graph()

    with graph.as_default(), tf.container("eval"):
        eval_src_data = eval_data[:, :, 0:hparams.src_feature_size]
        # channels: [features, SBP, DBP, MBP]
        eval_tgt_data = eval_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
        src_dataset = tf.data.Dataset.from_tensor_slices(eval_src_data)
        tgt_dataset = tf.data.Dataset.from_tensor_slices(eval_tgt_data)
        iterator = get_iterator(src_dataset, tgt_dataset, batch_size=batch_size,
                                random_seed=hparams.random_seed, is_train=False)
        model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.EVAL)
    return EvalModel(graph=graph, model=model, iterator=iterator, data_mean=data_mean, data_std=data_std) 
Example #9
Source File: process.py    From DeepRNN with MIT License 6 votes vote down vote up
def create_train_model(model_creator, hparams, data_dir):
    """Create train graph, model, and iterator."""
    train_data_path = []
    for root, _, name in os.walk(os.path.join(data_dir, 'train_data')):
        for x in name:
            if x.split('.')[-1] == 'mat':
                train_data_path.append(os.path.join(root, x))
    assert len(train_data_path) == 1
    train_data = scio.loadmat(*train_data_path)['data']
    assert hparams.src_len == hparams.tgt_len == train_data.shape[1]
    graph = tf.Graph()

    with graph.as_default(), tf.container("train"):
        # channels: [features, SBP, DBP, MBP]
        train_src_data = train_data[:, :, 0:hparams.src_feature_size]
        train_tgt_data = train_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
        src_dataset = tf.data.Dataset.from_tensor_slices(train_src_data)
        tgt_dataset = tf.data.Dataset.from_tensor_slices(train_tgt_data)
        iterator = get_iterator(src_dataset, tgt_dataset, batch_size=hparams.batch_size,
                                random_seed=hparams.random_seed, is_train=True)
        model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.TRAIN)
    return TrainModel(graph=graph, model=model, iterator=iterator) 
Example #10
Source File: enjoy-adv.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, env, dueling, noisy, fname):
        self.g = tf.Graph()
        self.noisy = noisy
        self.dueling = dueling
        self.env = env
        with self.g.as_default():
            self.act = deepq.build_act_enjoy(
                make_obs_ph=lambda name: U.Uint8Input(
                    env.observation_space.shape, name=name),
                q_func=dueling_model if dueling else model,
                num_actions=env.action_space.n,
                noisy=noisy
            )
            self.saver = tf.train.Saver()
        self.sess = tf.Session(graph=self.g)

        if fname is not None:
            print('Loading Model...')
            self.saver.restore(self.sess, fname) 
Example #11
Source File: enjoy-adv.py    From rl-attack with MIT License 6 votes vote down vote up
def parse_args():
	parser = argparse.ArgumentParser("Run an already learned DQN model.")
	# Environment
	parser.add_argument("--env", type=str, required=True, help="name of the game")
	parser.add_argument("--model-dir", type=str, default=None, help="load model from this directory. ")
	parser.add_argument("--video", type=str, default=None, help="Path to mp4 file where the video of first episode will be recorded.")
	boolean_flag(parser, "stochastic", default=True, help="whether or not to use stochastic actions according to models eps value")
	boolean_flag(parser, "dueling", default=False, help="whether or not to use dueling model")
	#V: Attack Arguments#
	parser.add_argument("--model-dir2", type=str, default=None, help="load adversarial model from this directory (blackbox attacks). ")
	parser.add_argument("--attack", type=str, default=None, help="Method to attack the model.")
	boolean_flag(parser, "noisy", default=False, help="whether or not to NoisyNetwork")
	boolean_flag(parser, "noisy2", default=False, help="whether or not to NoisyNetwork")
	boolean_flag(parser, "blackbox", default=False, help="whether or not to NoisyNetwork")

	return parser.parse_args() 
Example #12
Source File: multigpu_train.py    From uai-sdk with Apache License 2.0 6 votes vote down vote up
def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=True)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)

    return total_loss, model_loss 
Example #13
Source File: train.py    From cleverhans with MIT License 6 votes vote down vote up
def maybe_save_model(savedir, container, state):
  if savedir is None:
    return
  start_time = time.time()
  model_dir = "model-{}".format(state["num_iters"])
  U.save_state(os.path.join(savedir, model_dir, "saved"))
  if container is not None:
    container.put(os.path.join(savedir, model_dir), model_dir)
  relatively_safe_pickle_dump(state,
                              os.path.join(savedir,
                                           'training_state.pkl.zip'),
                              compression=True)
  if container is not None:
    container.put(os.path.join(savedir, 'training_state.pkl.zip'),
                  'training_state.pkl.zip')
  relatively_safe_pickle_dump(state["monitor_state"],
                              os.path.join(savedir, 'monitor_state.pkl'))
  if container is not None:
    container.put(os.path.join(savedir, 'monitor_state.pkl'),
                  'monitor_state.pkl')
  logger.log("Saved model in {} seconds\n".format(time.time() - start_time)) 
Example #14
Source File: train.py    From cleverhans with MIT License 6 votes vote down vote up
def maybe_load_model(savedir, container):
  """Load model if present at the specified path."""
  if savedir is None:
    return

  state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
  if container is not None:
    logger.log("Attempting to download model from Azure")
    found_model = container.get(savedir, 'training_state.pkl.zip')
  else:
    found_model = os.path.exists(state_path)
  if found_model:
    state = pickle_load(state_path, compression=True)
    model_dir = "model-{}".format(state["num_iters"])
    if container is not None:
      container.get(savedir, model_dir)
    U.load_state(os.path.join(savedir, model_dir, "saved"))
    logger.log("Loaded models checkpoint at {} iterations".format(
        state["num_iters"]))
    return state 
Example #15
Source File: train.py    From rl-attack with MIT License 6 votes vote down vote up
def maybe_save_model(savedir, container, state):
    """This function checkpoints the model and state of the training algorithm."""
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)
    if container is not None:
        container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')
    relatively_safe_pickle_dump(state["monitor_state"], os.path.join(savedir, 'monitor_state.pkl'))
    if container is not None:
        container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time)) 
Example #16
Source File: train.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def maybe_load_model(savedir, container):
    """Load model if present at the specified path."""
    if savedir is None:
        return

    state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
    if container is not None:
        logger.log("Attempting to download model from Azure")
        found_model = container.get(savedir, 'training_state.pkl.zip')
    else:
        found_model = os.path.exists(state_path)
    if found_model:
        state = pickle_load(state_path, compression=True)
        model_dir = "model-{}".format(state["num_iters"])
        if container is not None:
            container.get(savedir, model_dir)
        U.load_state(os.path.join(savedir, model_dir, "saved"))
        logger.log("Loaded models checkpoint at {} iterations".format(
            state["num_iters"]))
        return state 
Example #17
Source File: train.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def maybe_save_model(savedir, container, state):
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state,
                                os.path.join(savedir,
                                             'training_state.pkl.zip'),
                                compression=True)
    if container is not None:
        container.put(os.path.join(savedir, 'training_state.pkl.zip'),
                      'training_state.pkl.zip')
    relatively_safe_pickle_dump(state["monitor_state"],
                                os.path.join(savedir, 'monitor_state.pkl'))
    if container is not None:
        container.put(os.path.join(savedir, 'monitor_state.pkl'),
                      'monitor_state.pkl')
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time)) 
Example #18
Source File: train.py    From emdqn with MIT License 6 votes vote down vote up
def maybe_save_model(savedir, container, state):
    """This function checkpoints the model and state of the training algorithm."""
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)
    if container is not None:
        container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')
    relatively_safe_pickle_dump(state["monitor_state"], os.path.join(savedir, 'monitor_state.pkl'))
    if container is not None:
        container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time)) 
Example #19
Source File: train.py    From emdqn with MIT License 6 votes vote down vote up
def maybe_load_model(savedir, container):
    """Load model if present at the specified path."""
    if savedir is None:
        return

    state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
    if container is not None:
        logger.log("Attempting to download model from Azure")
        found_model = container.get(savedir, 'training_state.pkl.zip')
    else:
        found_model = os.path.exists(state_path)
    if found_model:
        state = pickle_load(state_path, compression=True)
        model_dir = "model-{}".format(state["num_iters"])
        if container is not None:
            container.get(savedir, model_dir)
        U.load_state(os.path.join(savedir, model_dir, "saved"))
        logger.log("Loaded models checkpoint at {} iterations".format(state["num_iters"]))
        return state 
Example #20
Source File: train.py    From vae-audio with MIT License 5 votes vote down vote up
def main(config):
    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.initialize('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = config.initialize('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss = getattr(module_loss, config['loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = config.initialize('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer)

    trainer = getattr(module_trainer, config['trainer']['type'])(model, loss, metrics, optimizer,
                                                                    config=config,
                                                                    data_loader=data_loader,
                                                                    valid_data_loader=valid_data_loader,
                                                                    lr_scheduler=lr_scheduler)
    trainer.train() 
Example #21
Source File: distgpu_train.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def _tower_fn(is_training, images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=is_training)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    summaries = None
    if reuse_variables is None:
        image_sum = tf.summary.image('input', images)
        score_sum = tf.summary.image('score_map', score_maps)
        f_score_sum = tf.summary.image('score_map_pred', f_score * 255)
        geo_sum = tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        f_geo_sum = tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        mask_sum = tf.summary.image('training_masks', training_masks)
        loss1_sum = tf.summary.scalar('model_loss', model_loss)
        loss_sum = tf.summary.scalar('total_loss', total_loss)
        summaries = [image_sum, score_sum, f_score_sum, geo_sum, f_geo_sum, mask_sum, loss1_sum, loss_sum]

    model_params = tf.trainable_variables()
    tower_grad = tf.gradients(total_loss, model_params)

    return total_loss, zip(tower_grad, model_params), summaries 
Example #22
Source File: distgpu_train.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir, 
            subset,
            num_shards, 
            batch_size,
            use_distortion_for_training=True):
    """Create input graph for model.

    Args:
      data_dir: Directory where TFRecords representing the dataset are located.
      subset: one of 'train', 'validate' and 'eval'.
      num_shards: num of towers participating in data-parallel training.
      batch_size: total batch size for training to be divided by the number of
      shards.
      use_distortion_for_training: True to use distortions.
    Returns:
        four
    """
    with tf.device('/cpu:0'):
        use_distortion = subset == 'train' and use_distortion_for_training
        dataset = icdar_dataset.EastDataSet(data_dir, batch_size, subset, use_distortion)
        image_batch, score_map_batch, geo_map_batch, training_mask_batch = dataset.make_batch(batch_size)

        if num_shards <= 1:
            # No GPU available or only 1 GPU.
            num_shards = 1

        feature_shards = tf.split(image_batch, num_shards)
        score_map_shards = tf.split(score_map_batch, num_shards)
        geo_map_shards = tf.split(geo_map_batch, num_shards)
        training_mask_shards = tf.split(training_mask_batch, num_shards)

        return feature_shards, [score_map_shards, geo_map_shards, training_mask_shards] 
Example #23
Source File: enjoy-adv.py    From rl-attack with MIT License 5 votes vote down vote up
def craft_adv(self):
		with self.sess.as_default():
			with self.g.as_default():
				craft_adv_obs = deepq.build_adv(
								make_obs_tf=lambda name: U.Uint8Input(self.env.observation_space.shape, name=name),
								q_func=dueling_model if self.dueling else model,
								num_actions=self.env.action_space.n,
								epsilon = 1.0/255.0,
								noisy=self.noisy,
								)
		return craft_adv_obs 
Example #24
Source File: east_multi_infer.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def _tower_fn(images):
    with tf.variable_scope(tf.get_variable_scope()):
        f_score, f_geometry = model.model(images, is_training=False)
    return f_score, f_geometry 
Example #25
Source File: process.py    From schedula with European Union Public License 1.1 5 votes vote down vote up
def save_outputs(outputs, output_fpath):
    """
    Save model outputs in an Excel file.

    :param outputs:
        Model outputs.
    :type outputs: dict

    :param output_fpath:
        Output file path.
    :type output_fpath: str
    """
    df = pd.DataFrame(outputs)
    with pd.ExcelWriter(output_fpath) as writer:
        df.to_excel(writer) 
Example #26
Source File: main.py    From FewShot_GAN-Unet3D with MIT License 5 votes vote down vote up
def main(_):
  # Create required directories
  if not os.path.exists(FLAGS.checkpoint_dir):
    os.makedirs(FLAGS.checkpoint_dir)

  if not os.path.exists(FLAGS.results_dir):
    os.makedirs(FLAGS.results_dir)

  if not os.path.exists(FLAGS.best_checkpoint_dir):
    os.makedirs(FLAGS.best_checkpoint_dir)


  # To configure the GPU fraction
  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_frac)

  # Parameters of extracted training and testing patches
  patch_shape=(32,32,32)
  extraction_step=(8,8,8)
  testing_extraction_shape=(8,8,8)

  if FLAGS.training:
    # For training the network
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
      network = model(sess,patch_shape,extraction_step)
      network.build_model()
      network.train()
  if FLAGS.testing:
      # For testing the trained network
      test(patch_shape,testing_extraction_shape) 
Example #27
Source File: train.py    From pix2pix-flow with MIT License 5 votes vote down vote up
def infer(sess, model, hps, iterators, its):
    from tqdm import tqdm
    assert hps.restore_path_A != ''
    assert hps.restore_path_B != ''

    xs_A, xs_B = [], []
    zs_A, zs_B = [], []
    for it in tqdm(range(its)):
        x_A, y_A = iterators['A']()
        x_B, y_B = iterators['B']()

        # A2B
        z_A = model.encode(x_A, y_A, 'model_A')
        x_B_recon = model.decode(y_B, z_A, 'model_B')
        xs_B.append(x_B_recon)
        zs_A.append(z_A)

        # B2A
        z_B = model.encode(x_B, y_B, 'model_B')
        x_A_recon = model.decode(y_A, z_B, 'model_A')
        xs_A.append(x_A_recon)
        zs_B.append(z_B)

    x_A = np.concatenate(xs_A, axis=0)
    z_A = np.concatenate(zs_A, axis=0)
    x_B = np.concatenate(xs_B, axis=0)
    z_B = np.concatenate(zs_B, axis=0)

    np.save(os.path.join(hps.logdir, 'z_A'), z_A)
    np.save(os.path.join(hps.logdir, 'z_B'), z_B)

    from utils import npy2img
    npy2img(os.path.join(hps.logdir, 'B2A'), x_A)
    npy2img(os.path.join(hps.logdir, 'A2B'), x_B)

    return x_A, z_A, x_B, z_B

# Get number of training and validation iterations 
Example #28
Source File: demo.py    From chinese_ocr with MIT License 5 votes vote down vote up
def test_img3():
    """
    single line need resize img, set alph to 0.2 to adjust anchor
    :return:
    """
    detectAngle = False
    path = "test_images/line.jpg"

    img = cv2.imread(path)  ##GBR
    img2, f = letterbox_image(Image.fromarray(img), IMGSIZE)

    _, result, angle = model.model(img2,
                                   detectAngle=detectAngle,  ##是否进行文字方向检测,通过web传参控制
                                   config=dict(MAX_HORIZONTAL_GAP=50,  ##字符之间的最大间隔,用于文本行的合并
                                               MIN_V_OVERLAPS=0.6,
                                               MIN_SIZE_SIM=0.6,
                                               TEXT_PROPOSALS_MIN_SCORE=0.1,
                                               TEXT_PROPOSALS_NMS_THRESH=0.3,
                                               TEXT_LINE_NMS_THRESH=0.7,  ##文本行之间测iou值
                                               ),
                                   leftAdjust=True,  ##对检测的文本行进行向左延伸
                                   rightAdjust=True,  ##对检测的文本行进行向右延伸
                                   alph=0.2,  ##对检测的文本行进行向右、左延伸的倍数
                                   )
    #
    print(result, angle)
    # [{'cx': 280.5, 'cy': 26.5, 'text': '客店遒劲摊婕有力', 'w': 606.0, 'h': 50.0, 'degree': 0.10314419109384157}] 0 
Example #29
Source File: enjoy-adv.py    From cleverhans with MIT License 5 votes vote down vote up
def craft_adv(self):
    with self.sess.as_default():
      with self.g.as_default():
        craft_adv_obs = deepq.build_adv(
            make_obs_tf=lambda name: U.Uint8Input(
                self.env.observation_space.shape, name=name),
            q_func=dueling_model if self.dueling else model,
            num_actions=self.env.action_space.n,
            epsilon=1.0 / 255.0,
            noisy=self.noisy,
        )
    return craft_adv_obs 
Example #30
Source File: enjoy-adv.py    From cleverhans with MIT License 5 votes vote down vote up
def parse_args():
  parser = argparse.ArgumentParser("Run an already learned DQN model.")
  # Environment
  parser.add_argument("--env", type=str, required=True,
                      help="name of the game")
  parser.add_argument("--model-dir", type=str, default=None,
                      help="load model from this directory. ")
  parser.add_argument("--video", type=str, default=None,
                      help="Path to mp4 file where the \
                        video of first episode will be recorded.")
  boolean_flag(parser, "stochastic", default=True,
               help="whether or not to use stochastic \
                 actions according to models eps value")
  boolean_flag(parser, "dueling", default=False,
               help="whether or not to use dueling model")
  # V: Attack Arguments#
  parser.add_argument("--model-dir2", type=str, default=None,
                      help="load adversarial model from \
                        this directory (blackbox attacks). ")
  parser.add_argument("--attack", type=str, default=None,
                      help="Method to attack the model.")
  boolean_flag(parser, "noisy", default=False,
               help="whether or not to NoisyNetwork")
  boolean_flag(parser, "noisy2", default=False,
               help="whether or not to NoisyNetwork")
  boolean_flag(parser, "blackbox", default=False,
               help="whether or not to NoisyNetwork")

  return parser.parse_args()