Python utils.get_model_dir() Examples
The following are 9
code examples of utils.get_model_dir().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: variable_stat.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) mapper = [(inflection.underscore(name), member()) for name, member in inspect.getmembers(importlib.machinery.SourceFileLoader('', __file__).load_module()) if inspect.isclass(member)] path = os.path.join(model_dir, os.path.basename(os.path.splitext(__file__)[0])) + '.xlsx' with xlsxwriter.Workbook(path, {'strings_to_urls': False, 'nan_inf_to_errors': True}) as workbook: worksheet = workbook.add_worksheet(args.worksheet) for j, (key, m) in enumerate(mapper): worksheet.write(0, j, key) for i, (name, variable) in enumerate(state_dict.items()): value = m(name, variable) worksheet.write(1 + i, j, value) if hasattr(m, 'format'): m.format(workbook, worksheet, i, j) worksheet.autofilter(0, 0, i, len(mapper) - 1) worksheet.freeze_panes(1, 0) logging.info(path)
Example #2
Source File: convert_onnx_caffe2.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) model = onnx.load(model_dir + '.onnx') onnx.checker.check_model(model) init_net, predict_net = onnx_caffe2.backend.Caffe2Backend.onnx_graph_to_caffe2_net(model.graph, device='CPU') onnx_caffe2.helper.save_caffe2_net(init_net, os.path.join(model_dir, 'init_net.pb')) onnx_caffe2.helper.save_caffe2_net(predict_net, os.path.join(model_dir, 'predict_net.pb'), output_txt=True) logging.info(model_dir)
Example #3
Source File: checksum_caffe2.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) torch.manual_seed(args.seed) model_dir = utils.get_model_dir(config) init_net = caffe2_pb2.NetDef() with open(os.path.join(model_dir, 'init_net.pb'), 'rb') as f: init_net.ParseFromString(f.read()) predict_net = caffe2_pb2.NetDef() with open(os.path.join(model_dir, 'predict_net.pb'), 'rb') as f: predict_net.ParseFromString(f.read()) p = workspace.Predictor(init_net, predict_net) height, width = tuple(map(int, config.get('image', 'size').split())) tensor = torch.randn(1, 3, height, width) # Checksum output = p.run([tensor.numpy()]) for key, a in [ ('tensor', tensor.cpu().numpy()), ('output', output[0]), ]: print('\t'.join(map(str, [key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest()])))
Example #4
Source File: demo_graph.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) category = utils.get_category(config) anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() try: path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) except (FileNotFoundError, ValueError): logging.warning('model cannot be loaded') state_dict = None dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), anchors, len(category)) logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values()))) if state_dict is not None: dnn.load_state_dict(state_dict) height, width = tuple(map(int, config.get('image', 'size').split())) image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height, width)) output = dnn(image) state_dict = dnn.state_dict() graph = utils.visualize.Graph(config, state_dict) graph(output.grad_fn) diff = [key for key in state_dict if key not in graph.drawn] if diff: logging.warning('variables not shown: ' + str(diff)) path = graph.dot.view(os.path.basename(model_dir) + '.gv', os.path.dirname(model_dir)) logging.info(path)
Example #5
Source File: checksum_torch.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) torch.manual_seed(args.seed) cache_dir = utils.get_cache_dir(config) model_dir = utils.get_model_dir(config) category = utils.get_category(config, cache_dir if os.path.exists(cache_dir) else None) anchors = utils.get_anchors(config) anchors = torch.from_numpy(anchors).contiguous() path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), anchors, len(category)) dnn.load_state_dict(state_dict) height, width = tuple(map(int, config.get('image', 'size').split())) tensor = torch.randn(1, 3, height, width) # Checksum for key, var in dnn.state_dict().items(): a = var.cpu().numpy() print('\t'.join(map(str, [key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest()]))) output = dnn(torch.autograd.Variable(tensor, volatile=True)).data for key, a in [ ('tensor', tensor.cpu().numpy()), ('output', output.cpu().numpy()), ]: print('\t'.join(map(str, [key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest()])))
Example #6
Source File: pruner.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) category = utils.get_category(config) anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) _model = utils.parse_attr(config.get('model', 'dnn')) dnn = _model(model.ConfigChannels(config, state_dict), anchors, len(category)) logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values()))) dnn.load_state_dict(state_dict) height, width = tuple(map(int, config.get('image', 'size').split())) image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height, width)) output = dnn(image) state_dict = dnn.state_dict() d = utils.dense(state_dict[args.name]) keep = torch.LongTensor(np.argsort(d)[:int(len(d) * args.keep)]) modifier = utils.channel.Modifier( args.name, state_dict, dnn, lambda name, var: var[keep], lambda name, var, mapper: var[mapper(keep, len(d))], debug=args.debug, ) modifier(output.grad_fn) if args.debug: path = modifier.dot.view('%s.%s.gv' % (os.path.basename(model_dir), os.path.basename(os.path.splitext(__file__)[0])), os.path.dirname(model_dir)) logging.info(path) assert len(keep) == len(state_dict[args.name]) dnn = _model(model.ConfigChannels(config, state_dict), anchors, len(category)) dnn.load_state_dict(state_dict) dnn(image) if not args.debug: torch.save(state_dict, path)
Example #7
Source File: main.py From gated-pixel-cnn with Apache License 2.0 | 5 votes |
def main(_): model_dir = util.get_model_dir(conf, ['data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step', 'is_train', 'random_seed', 'log_level', 'display', 'runtime_base_dir', 'occlude_start_row', 'num_generated_images']) util.preprocess_conf(conf) validate_parameters(conf) data = 'mnist' if conf.data == 'color-mnist' else conf.data DATA_DIR = os.path.join(conf.runtime_base_dir, conf.data_dir, data) SAMPLE_DIR = os.path.join(conf.runtime_base_dir, conf.sample_dir, conf.data, model_dir) util.check_and_create_dir(DATA_DIR) util.check_and_create_dir(SAMPLE_DIR) dataset = get_dataset(DATA_DIR, conf.q_levels) with tf.Session() as sess: network = Network(sess, conf, dataset.height, dataset.width, dataset.channels) stat = Statistic(sess, conf.data, conf.runtime_base_dir, model_dir, tf.trainable_variables()) stat.load_model() if conf.is_train: train(dataset, network, stat, SAMPLE_DIR) else: generate(network, dataset.height, dataset.width, SAMPLE_DIR)
Example #8
Source File: convert_onnx_caffe2.py From openpose-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) model = onnx.load(model_dir + '.onnx') onnx.checker.check_model(model) init_net, predict_net = onnx_caffe2.backend.Caffe2Backend.onnx_graph_to_caffe2_net(model.graph, device='CPU') onnx_caffe2.helper.save_caffe2_net(init_net, os.path.join(model_dir, 'init_net.pb')) onnx_caffe2.helper.save_caffe2_net(predict_net, os.path.join(model_dir, 'predict_net.pb'), output_txt=True) logging.info(model_dir)
Example #9
Source File: main.py From NAF-tensorflow with MIT License | 4 votes |
def main(_): model_dir = get_model_dir(conf, ['is_train', 'random_seed', 'monitor', 'display', 'log_level']) preprocess_conf(conf) with tf.Session() as sess: # environment env = gym.make(conf.env_name) env.seed(conf.random_seed) assert isinstance(env.observation_space, gym.spaces.Box), \ "observation space must be continuous" assert isinstance(env.action_space, gym.spaces.Box), \ "action space must be continuous" # exploration strategy if conf.noise == 'ou': strategy = OUExploration(env, sigma=conf.noise_scale) elif conf.noise == 'brownian': strategy = BrownianExploration(env, conf.noise_scale) elif conf.noise == 'linear_decay': strategy = LinearDecayExploration(env) else: raise ValueError('Unkown exploration strategy: %s' % conf.noise) # networks shared_args = { 'sess': sess, 'input_shape': env.observation_space.shape, 'action_size': env.action_space.shape[0], 'hidden_dims': conf.hidden_dims, 'use_batch_norm': conf.use_batch_norm, 'use_seperate_networks': conf.use_seperate_networks, 'hidden_w': conf.hidden_w, 'action_w': conf.action_w, 'hidden_fn': conf.hidden_fn, 'action_fn': conf.action_fn, 'w_reg': conf.w_reg, } logger.info("Creating prediction network...") pred_network = Network( scope='pred_network', **shared_args ) logger.info("Creating target network...") target_network = Network( scope='target_network', **shared_args ) target_network.make_soft_update_from(pred_network, conf.tau) # statistic stat = Statistic(sess, conf.env_name, model_dir, pred_network.variables, conf.update_repeat) agent = NAF(sess, env, strategy, pred_network, target_network, stat, conf.discount, conf.batch_size, conf.learning_rate, conf.max_steps, conf.update_repeat, conf.max_episodes) agent.run(conf.monitor, conf.display, conf.is_train)