Python models.build_model() Examples
The following are 8
code examples of models.build_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
models
, or try the search function
.
Example #1
Source File: train_val.py From brats_segmentation-pytorch with MIT License | 7 votes |
def main(): init_env('1') loaders = make_data_loaders(cfg) model = build_model(cfg) model = model.cuda() task_name = 'base_unet' log_dir = os.path.join(cfg.LOG_DIR, task_name) cfg.TASK_NAME = task_name mkdir(log_dir) logger = setup_logger('train', log_dir, filename='train.log') logger.info(cfg) logger = setup_logger('eval', log_dir, filename='eval.log') optimizer, scheduler = make_optimizer(cfg, model) metrics = get_metrics(cfg) losses = get_losses(cfg) train_val(model, loaders, optimizer, scheduler, losses, metrics)
Example #2
Source File: main.py From nmn2 with Apache License 2.0 | 6 votes |
def main(): config = configure() task = tasks.load_task(config) model = models.build_model(config.model, config.opt) for i_epoch in range(config.opt.iters): train_loss, train_acc, _ = \ do_iter(task.train, model, config, train=True) val_loss, val_acc, val_predictions = \ do_iter(task.val, model, config, vis=True) test_loss, test_acc, test_predictions = \ do_iter(task.test, model, config) logging.info( "%5d | %8.3f %8.3f %8.3f | %8.3f %8.3f %8.3f", i_epoch, train_loss, val_loss, test_loss, train_acc, val_acc, test_acc) with open("logs/val_predictions_%d.json" % i_epoch, "w") as pred_f: print >>pred_f, json.dumps(val_predictions) #with open("logs/test_predictions_%d.json" % i_epoch, "w") as pred_f: # print >>pred_f, json.dumps(test_predictions)
Example #3
Source File: search.py From elbow with BSD 3-Clause "New" or "Revised" License | 6 votes |
def main(): N = 50 D = 20 settings = ExperimentSettings() settings.max_rank=2 settings.gaussian_auto_ard = False settings.constant_gaussian_std = 1.0 settings.constant_noise_std = 0.1 #X = np.float32(np.random.randn(N, D)) m = build_model(('lowrank', ('chain', 'g'), 'g'), (N, D), settings) #m = build_model(('chain', 'g'), (N, D), settings) X = m.sample() #X /= np.std(X) best_structure = do_structure_search(X, settings)
Example #4
Source File: eval.py From DBNet.pytorch with Apache License 2.0 | 6 votes |
def __init__(self, model_path, gpu_id=0): from models import build_model from data_loader import get_dataloader from post_processing import get_post_processing from utils import get_metric self.gpu_id = gpu_id if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available(): self.device = torch.device("cuda:%s" % self.gpu_id) torch.backends.cudnn.benchmark = True else: self.device = torch.device("cpu") checkpoint = torch.load(model_path, map_location=torch.device('cpu')) config = checkpoint['config'] config['arch']['backbone']['pretrained'] = False self.validate_loader = get_dataloader(config['dataset']['validate'], config['distributed']) self.model = build_model(config['arch'].pop('type'), **config['arch']) self.model.load_state_dict(checkpoint['state_dict']) self.model.to(self.device) self.post_process = get_post_processing(config['post_processing']) self.metric_cls = get_metric(config['metric'])
Example #5
Source File: search.py From elbow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_model(structure, X, settings): N, D = X.shape m = build_model(structure, (N, D), settings) m.observe(X) jm = Model(m) jm.train(silent=True, stopping_rule=settings.stopping_rule, adam_rate=settings.adam_rate) score = jm.monte_carlo_elbo(n_samples=settings.n_elbo_samples) return score
Example #6
Source File: run.py From pynlp with MIT License | 5 votes |
def train(config): # load train data print("start load data") train_data_df = load_data_from_csv(os.path.join(config.data_dir, config.file_names[0])) validate_data_df = load_data_from_csv(os.path.join(config.data_dir, config.file_names[1])) # explore data print("explore train data!") explore_data_analysis(train_data_df) print("explore dev data!") explore_data_analysis(validate_data_df) content_train = train_data_df.iloc[:, 0] content_val = validate_data_df.iloc[:, 0] if config.write_vocab: write_vocab(content_train, os.path.join(config.data_dir, config.file_prefix + 'vocab.data'), min_count=5) print("start convert str2id!") word2id = load_vocab(os.path.join(config.data_dir, config.file_prefix + 'vocab.data')) train_data = list(map(lambda x: string2id(x, word2id), content_train)) print("train_data的长度", len(train_data)) val_data = list(map(lambda x: string2id(x, word2id), content_val)) print("create experiment dir") config = prepare_experiment(config, len(word2id), len(train_data_df)) set_logger(config) train_label = train_data_df.iloc[:, 1] val_label = validate_data_df.iloc[:, 1] train_set = DataSet(config.batch_size, train_data, train_label, config.sequence_length) dev_set = DataSet(config.batch_size, val_data, val_label, config.sequence_length) print("-----start train model------") model = build_model(config) train_module(model, config, train_set, dev_set) print("finish train %s model")
Example #7
Source File: predict.py From DBNet.pytorch with Apache License 2.0 | 5 votes |
def __init__(self, model_path, post_p_thre=0.7, gpu_id=None): ''' 初始化pytorch模型 :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件) :param gpu_id: 在哪一块gpu上运行 ''' self.gpu_id = gpu_id if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available(): self.device = torch.device("cuda:%s" % self.gpu_id) else: self.device = torch.device("cpu") print('device:', self.device) checkpoint = torch.load(model_path, map_location=self.device) config = checkpoint['config'] config['arch']['backbone']['pretrained'] = False self.model = build_model(config['arch'].pop('type'), **config['arch']) self.post_process = get_post_processing(config['post_processing']) self.post_process.box_thresh = post_p_thre self.img_mode = config['dataset']['train']['dataset']['args']['img_mode'] self.model.load_state_dict(checkpoint['state_dict']) self.model.to(self.device) self.model.eval() self.transform = [] for t in config['dataset']['train']['dataset']['args']['transforms']: if t['type'] in ['ToTensor', 'Normalize']: self.transform.append(t) self.transform = get_transforms(self.transform)
Example #8
Source File: train.py From DBNet.pytorch with Apache License 2.0 | 5 votes |
def main(config): import torch from models import build_model, build_loss from data_loader import get_dataloader from trainer import Trainer from post_processing import get_post_processing from utils import get_metric if torch.cuda.device_count() > 1: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://", world_size=torch.cuda.device_count(), rank=args.local_rank) config['distributed'] = True else: config['distributed'] = False config['local_rank'] = args.local_rank train_loader = get_dataloader(config['dataset']['train'], config['distributed']) assert train_loader is not None if 'validate' in config['dataset']: validate_loader = get_dataloader(config['dataset']['validate'], False) else: validate_loader = None criterion = build_loss(config['loss']).cuda() config['arch']['backbone']['in_channels'] = 3 if config['dataset']['train']['dataset']['args']['img_mode'] != 'GRAY' else 1 model = build_model(config['arch']) post_p = get_post_processing(config['post_processing']) metric = get_metric(config['metric']) trainer = Trainer(config=config, model=model, criterion=criterion, train_loader=train_loader, post_process=post_p, metric_cls=metric, validate_loader=validate_loader) trainer.train()