Python utils.print_network() Examples
The following are 10
code examples of utils.print_network().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: tester.py From GCA-Matting with MIT License | 6 votes |
def __init__(self, test_dataloader): self.test_dataloader = test_dataloader self.logger = logging.getLogger("Logger") self.model_config = CONFIG.model self.test_config = CONFIG.test self.log_config = CONFIG.log self.data_config = CONFIG.data self.build_model() self.resume_step = None utils.print_network(self.G, CONFIG.version) if self.test_config.checkpoint: self.logger.info('Resume checkpoint: {}'.format(self.test_config.checkpoint)) self.restore_model(self.test_config.checkpoint)
Example #2
Source File: trainer.py From GCA-Matting with MIT License | 4 votes |
def __init__(self, train_dataloader, test_dataloader, logger, tb_logger): # Save GPU memory. cudnn.benchmark = False self.train_dataloader = train_dataloader self.test_dataloader = test_dataloader self.logger = logger self.tb_logger = tb_logger self.model_config = CONFIG.model self.train_config = CONFIG.train self.log_config = CONFIG.log self.loss_dict = {'rec': None, 'comp': None, 'smooth_l1':None, 'grad':None, 'gabor':None} self.test_loss_dict = {'rec': None, 'smooth_l1':None, 'mse':None, 'sad':None, 'grad':None, 'gabor':None} self.grad_filter = torch.tensor(utils.get_gradfilter()).cuda() self.gabor_filter = torch.tensor(utils.get_gaborfilter(16)).cuda() self.build_model() self.resume_step = None self.best_loss = 1e+8 utils.print_network(self.G, CONFIG.version) if self.train_config.resume_checkpoint: self.logger.info('Resume checkpoint: {}'.format(self.train_config.resume_checkpoint)) self.restore_model(self.train_config.resume_checkpoint) if self.model_config.imagenet_pretrain and self.train_config.resume_checkpoint is None: self.logger.info('Load Imagenet Pretrained: {}'.format(self.model_config.imagenet_pretrain_path)) if self.model_config.arch.encoder == "vgg_encoder": utils.load_VGG_pretrain(self.G, self.model_config.imagenet_pretrain_path) else: utils.load_imagenet_pretrain(self.G, self.model_config.imagenet_pretrain_path)
Example #3
Source File: EBGAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 64 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type # EBGAN parameters self.pt_loss_weight = 0.1 self.margin = max(1, self.batch_size / 64.) # margin for loss function # usually margin of 1 is enough, but for large batch size it must be larger than 1 # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() self.MSE_loss = nn.MSELoss().cuda() else: self.MSE_loss = nn.MSELoss() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load dataset if self.dataset == 'mnist': self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'fashion-mnist': self.data_loader = DataLoader( datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'celebA': self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose( [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size, shuffle=True) self.z_dim = 62 # fixed noise if self.gpu_mode: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True) else: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #4
Source File: CGAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 100 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() self.BCE_loss = nn.BCELoss().cuda() else: self.BCE_loss = nn.BCELoss() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load mnist self.data_X, self.data_Y = utils.load_mnist(args.dataset) self.z_dim = 62 self.y_dim = 10 # fixed noise & condition self.sample_z_ = torch.zeros((self.sample_num, self.z_dim)) for i in range(10): self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim) for j in range(1, self.y_dim): self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim] temp = torch.zeros((10, 1)) for i in range(self.y_dim): temp[i, 0] = i temp_y = torch.zeros((self.sample_num, 1)) for i in range(10): temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp self.sample_y_ = torch.zeros((self.sample_num, self.y_dim)) self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1) if self.gpu_mode: self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True) else: self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True)
Example #5
Source File: WGAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 64 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type self.c = 0.01 # clipping value self.n_critic = 5 # the number of iterations of the critic per generator iteration # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load dataset if self.dataset == 'mnist': self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'fashion-mnist': self.data_loader = DataLoader( datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'celebA': self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose( [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size, shuffle=True) self.z_dim = 62 # fixed noise if self.gpu_mode: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True) else: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #6
Source File: LSGAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 64 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() self.MSE_loss = nn.MSELoss().cuda() else: self.MSE_loss = nn.MSELoss() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load dataset if self.dataset == 'mnist': self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'fashion-mnist': self.data_loader = DataLoader( datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'celebA': self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose( [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size, shuffle=True) self.z_dim = 62 # fixed noise if self.gpu_mode: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True) else: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #7
Source File: GAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 16 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() self.BCE_loss = nn.BCELoss().cuda() else: self.BCE_loss = nn.BCELoss() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load dataset if self.dataset == 'mnist': self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'fashion-mnist': self.data_loader = DataLoader( datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'celebA': self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose( [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size, shuffle=True) self.z_dim = 62 # fixed noise if self.gpu_mode: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True) else: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #8
Source File: DRAGAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 64 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type self.lambda_ = 0.25 # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() self.BCE_loss = nn.BCELoss().cuda() else: self.BCE_loss = nn.BCELoss() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load dataset if self.dataset == 'mnist': self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'fashion-mnist': self.data_loader = DataLoader( datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'celebA': self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose( [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size, shuffle=True) self.z_dim = 62 # fixed noise if self.gpu_mode: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True) else: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #9
Source File: ACGAN.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 100 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() self.BCE_loss = nn.BCELoss().cuda() self.CE_loss = nn.CrossEntropyLoss().cuda() else: self.BCE_loss = nn.BCELoss() self.CE_loss = nn.CrossEntropyLoss() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load mnist self.data_X, self.data_Y = utils.load_mnist(args.dataset) self.z_dim = 62 self.y_dim = 10 # fixed noise & condition self.sample_z_ = torch.zeros((self.sample_num, self.z_dim)) for i in range(10): self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim) for j in range(1, self.y_dim): self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim] temp = torch.zeros((10, 1)) for i in range(self.y_dim): temp[i, 0] = i temp_y = torch.zeros((self.sample_num, 1)) for i in range(10): temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp self.sample_y_ = torch.zeros((self.sample_num, self.y_dim)) self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1) if self.gpu_mode: self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True) else: self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True)
Example #10
Source File: WGAN_GP.py From Generative_Model_Zoo with MIT License | 4 votes |
def __init__(self, args): # parameters self.epoch = args.epoch self.sample_num = 64 self.batch_size = args.batch_size self.save_dir = args.save_dir self.result_dir = args.result_dir self.dataset = args.dataset self.log_dir = args.log_dir self.gpu_mode = args.gpu_mode self.model_name = args.gan_type self.lambda_ = 0.25 self.n_critic = 5 # the number of iterations of the critic per generator iteration # networks init self.G = generator(self.dataset) self.D = discriminator(self.dataset) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G.cuda() self.D.cuda() print('---------- Networks architecture -------------') utils.print_network(self.G) utils.print_network(self.D) print('-----------------------------------------------') # load dataset if self.dataset == 'mnist': self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'fashion-mnist': self.data_loader = DataLoader( datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor()])), batch_size=self.batch_size, shuffle=True) elif self.dataset == 'celebA': self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose( [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size, shuffle=True) self.z_dim = 62 # fixed noise if self.gpu_mode: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True) else: self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)