Python util.init_logger() Examples

The following are 5 code examples of util.init_logger(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module util , or try the search function .
Example #1
Source File: train_seglink.py    From seglink with GNU General Public License v3.0 5 votes vote down vote up
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset 
Example #2
Source File: test_batch_and_gt.py    From seglink with GNU General Public License v3.0 5 votes vote down vote up
def main(_):
    util.init_logger()
    dump_path = util.io.get_absolute_path('~/temp/no-use/seglink/')
    
    dataset = config_initialization()
    batch_queue = create_dataset_batch_queue(dataset)
    batch_size = config.batch_size
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        tf.train.start_queue_runners(sess)
        b_image, b_seg_label, b_seg_offsets, b_link_label = batch_queue.dequeue()
        batch_idx = 0;
        while True: #batch_idx < 50:
            image_data_batch, seg_label_data_batch, seg_offsets_data_batch, link_label_data_batch = \
                            sess.run([b_image, b_seg_label, b_seg_offsets, b_link_label])
            for image_idx in xrange(batch_size):
                image_data = image_data_batch[image_idx, ...]
                seg_label_data = seg_label_data_batch[image_idx, ...]
                seg_offsets_data = seg_offsets_data_batch[image_idx, ...]
                link_label_data = link_label_data_batch[image_idx, ...]
                
                image_data = image_data + [123, 117, 104]
                image_data = np.asarray(image_data, dtype = np.uint8)
                
                # decode the encoded ground truth back to bboxes
                bboxes = seglink.seglink_to_bbox(seg_scores = seg_label_data, 
                                                 link_scores = link_label_data, 
                                                 seg_offsets_pred = seg_offsets_data)
                
                # draw bboxes on the image
                for bbox_idx in xrange(len(bboxes)):
                    bbox = bboxes[bbox_idx, :] 
                    draw_bbox(image_data, bbox)
                
                image_path = util.io.join_path(dump_path, '%d_%d.jpg'%(batch_idx, image_idx))
                util.plt.imwrite(image_path, image_data)
                print 'Make sure that the text on the image are correctly bounded\
                                                         with oriented boxes:', image_path 
            batch_idx += 1 
Example #3
Source File: train_pixel_link.py    From pixel_link with MIT License 5 votes vote down vote up
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(
        log_file = 'log_train_pixel_link_%d_%d.log'%image_shape, 
                    log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.load_config(FLAGS.train_dir)
            
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus
                   )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('train_pixel_link_on'+ '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset 
Example #4
Source File: train_pixel_link.py    From HUAWEIOCR-2019 with MIT License 5 votes vote down vote up
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(
        log_file = 'log_train_pixel_link_%d_%d.log'%image_shape, 
                    log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.load_config(FLAGS.train_dir)
            
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus
                   )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('train_pixel_link_on'+ '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset 
Example #5
Source File: cfg.py    From mcsema with Apache License 2.0 4 votes vote down vote up
def get_cfg(args, fixed_args):
  # Parse any additional args
  parser = argparse.ArgumentParser()

  parser.add_argument(
      '--recover-stack-vars',
      help='Flag to enable stack variable recovery',
      default=False,
      action='store_true')

  parser.add_argument(
      "--std-defs",
      action='append',
      type=str,
      default=[],
      help="std_defs file: definitions and calling conventions of imported functions and data")

  extra_args = parser.parse_args(fixed_args)

  if extra_args.recover_stack_vars:
    RECOVER_OPTS['stack_vars'] = True

  # Setup logger
  util.init_logger(args.log_file)

  # Load the binary in binja
  bv = util.load_binary(args.binary)

  # Once for good measure.
  bv.add_analysis_option("linearsweep")
  bv.update_analysis_and_wait()

  # Twice for good luck!
  bv.add_analysis_option("linearsweep")
  bv.update_analysis_and_wait()

  # Collect all paths to defs files
  log.debug('Parsing definitions files')
  def_paths = set(map(os.path.abspath, extra_args.std_defs))
  def_paths.add(os.path.join(DISASS_DIR, 'defs', '{}.txt'.format(args.os)))  # default defs file

  # Parse all of the defs files
  for fpath in def_paths:
    if os.path.isfile(fpath):
      parse_defs_file(bv, fpath)
    else:
      log.warn('%s is not a file', fpath)

  # Recover module
  log.debug('Starting analysis')
  pb_mod = recover_cfg(bv, args)

  # Save cfg
  log.debug('Saving to file: %s', args.output)
  with open(args.output, 'wb') as f:
    f.write(pb_mod.SerializeToString())

  return 0