Python chainer.serializers.load_npz() Examples
The following are 30
code examples of chainer.serializers.load_npz().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.serializers
, or try the search function
.
Example #1
Source File: chainer_alex.py From mlimages with MIT License | 6 votes |
def predict(limit): _limit = limit if limit > 0 else 5 td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP) label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE) model = alex.Alex(len(label_def)) serializers.load_npz(MODEL_FILE, model) i = 0 for arr, im in td.generate(): x = np.ndarray((1,) + arr.shape, arr.dtype) x[0] = arr x = chainer.Variable(np.asarray(x), volatile="on") y = model.predict(x) p = np.argmax(y.data) print("predict {0}, actual {1}".format(label_def[p], label_def[im.label])) im.image.show() i += 1 if i >= _limit: break
Example #2
Source File: fasterRCNN.py From deel with MIT License | 6 votes |
def __init__(self,modelpath='misc/VGG16_faster_rcnn_final.model', mean=[102.9801, 115.9465, 122.7717], in_size=224): super(FasterRCNN,self).__init__('FasterRCNN',in_size) self.func = FRCNN(Deel.gpu) self.func.train=False serializers.load_npz('misc/VGG16_faster_rcnn_final.model', self.func) ImageNet.mean_image = np.ndarray((3, 256, 256), dtype=np.float32) ImageNet.mean_image[0] = mean[0] ImageNet.mean_image[1] = mean[1] ImageNet.mean_image[2] = mean[2] ImageNet.in_size = in_size self.labels = CLASSES self.batchsize = 1 xp = Deel.xp self.x_batch = xp.ndarray((self.batchsize, 3, self.in_size, self.in_size), dtype=np.float32) if Deel.gpu >=0: self.func = self.func.to_gpu(Deel.gpu) self.optimizer = optimizers.Adam() self.optimizer.setup(self.func)
Example #3
Source File: train.py From deeppose with GNU General Public License v2.0 | 6 votes |
def get_model(model_path, n_joints, result_dir, resume_model): model_fn = os.path.basename(model_path) model_name = model_fn.split('.')[0] model = imp.load_source(model_name, model_path) model = getattr(model, model_name) # Initialize model = model(n_joints) # Copy files dst = '{}/{}'.format(result_dir, model_fn) if not os.path.exists(dst): shutil.copy(model_path, dst) # load model if resume_model is not None: serializers.load_npz(resume_model, model) return model
Example #4
Source File: test_once_trigger.py From chainer with MIT License | 6 votes |
def test_resumed_trigger_backward_compat(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.OnceTrigger(self.call_on_resume) for expected, finished in zip(self.resumed_expected[:self.resume], self.resumed_finished[:self.resume]): trainer.updater.update() self.assertEqual(trigger.finished, finished) self.assertEqual(trigger(trainer), expected) # old version does not save anything np.savez(f, dummy=0) trigger = training.triggers.OnceTrigger(self.call_on_resume) with testing.assert_warns(UserWarning): serializers.load_npz(f.name, trigger) for expected, finished in zip(self.resumed_expected[self.resume:], self.resumed_finished[self.resume:]): trainer.updater.update() self.assertEqual(trigger.finished, finished) self.assertEqual(trigger(trainer), expected)
Example #5
Source File: test_once_trigger.py From chainer with MIT License | 6 votes |
def test_resumed_trigger(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.OnceTrigger(self.call_on_resume) for expected, finished in zip(self.resumed_expected[:self.resume], self.resumed_finished[:self.resume]): trainer.updater.update() self.assertEqual(trigger.finished, finished) self.assertEqual(trigger(trainer), expected) serializers.save_npz(f.name, trigger) trigger = training.triggers.OnceTrigger(self.call_on_resume) serializers.load_npz(f.name, trigger) for expected, finished in zip(self.resumed_expected[self.resume:], self.resumed_finished[self.resume:]): trainer.updater.update() self.assertEqual(trigger.finished, finished) self.assertEqual(trigger(trainer), expected)
Example #6
Source File: test_interval_trigger.py From chainer with MIT License | 6 votes |
def test_resumed_trigger_sparse_call(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) accumulated = False with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.IntervalTrigger(*self.interval) for expected in self.expected[:self.resume]: trainer.updater.update() accumulated = accumulated or expected if random.randrange(2): self.assertEqual(trigger(trainer), accumulated) accumulated = False serializers.save_npz(f.name, trigger) trigger = training.triggers.IntervalTrigger(*self.interval) serializers.load_npz(f.name, trigger) for expected in self.expected[self.resume:]: trainer.updater.update() accumulated = accumulated or expected if random.randrange(2): self.assertEqual(trigger(trainer), accumulated) accumulated = False
Example #7
Source File: test_manual_schedule_trigger.py From chainer with MIT License | 6 votes |
def test_resumed_trigger_backward_compat(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.ManualScheduleTrigger(*self.schedule) for expected, finished in zip(self.expected[:self.resume], self.finished[:self.resume]): trainer.updater.update() self.assertEqual(trigger(trainer), expected) self.assertEqual(trigger.finished, finished) # old version does not save anything np.savez(f, dummy=0) trigger = training.triggers.ManualScheduleTrigger(*self.schedule) with testing.assert_warns(UserWarning): serializers.load_npz(f.name, trigger) for expected, finished in zip(self.expected[self.resume:], self.finished[self.resume:]): trainer.updater.update() self.assertEqual(trigger(trainer), expected) self.assertEqual(trigger.finished, finished)
Example #8
Source File: test_manual_schedule_trigger.py From chainer with MIT License | 6 votes |
def test_resumed_trigger(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.ManualScheduleTrigger(*self.schedule) for expected, finished in zip(self.expected[:self.resume], self.finished[:self.resume]): trainer.updater.update() self.assertEqual(trigger(trainer), expected) self.assertEqual(trigger.finished, finished) serializers.save_npz(f.name, trigger) trigger = training.triggers.ManualScheduleTrigger(*self.schedule) serializers.load_npz(f.name, trigger) for expected, finished in zip(self.expected[self.resume:], self.finished[self.resume:]): trainer.updater.update() self.assertEqual(trigger(trainer), expected) self.assertEqual(trigger.finished, finished)
Example #9
Source File: cnn_train.py From cgp-cnn with MIT License | 6 votes |
def test(self, cgp, model_file, comp_graph='comp_graph.dot', batchsize=256): chainer.cuda.get_device(0).use() # Make a specified GPU current model = CGP2CNN(cgp, self.n_class) print('\tLoad model from', model_file) serializers.load_npz(model_file, model) model.to_gpu(0) test_accuracy, test_loss = self.__test(model, batchsize) print('\tparamNum={}'.format(model.param_num)) print('\ttest mean loss={}, test accuracy={}'.format(test_loss / self.test_data_num, test_accuracy / self.test_data_num)) if comp_graph is not None: with open(comp_graph, 'w') as o: g = computational_graph.build_computational_graph((model.loss,)) o.write(g.dump()) del g print('\tCNN graph generated ({}).'.format(comp_graph)) return test_accuracy, test_loss
Example #10
Source File: train.py From knmt with GNU General Public License v3.0 | 6 votes |
def load_model_flexible(filename_list, encdec): mode = "normal" if isinstance(filename_list, tuple) or isinstance(filename_list, list): if len(filename_list) == 1: filename_list = filename_list[0] else: mode = "average" if mode == "normal": log.info("loading model parameters from %s", filename_list) try: serializers.load_npz(filename_list, encdec) except KeyError: log.info("not model format, trying snapshot format") with np.load(filename_list) as fseri: dicseri = serializers.NpzDeserializer(fseri, path="updater/model:main/") dicseri.load(encdec) else: assert mode == "average" log.info("loading averaged model parameters from %r", filename_list) dseri = NpzDeserializerAverage([np.load(filename) for filename in filename_list]) dseri.load(encdec)
Example #11
Source File: mnist.py From sagemaker-python-sdk with Apache License 2.0 | 5 votes |
def model_fn(model_dir): model = L.Classifier(MLP(1000, 10)) serializers.load_npz(os.path.join(model_dir, "model.npz"), model) return model.predictor
Example #12
Source File: fasterRCNN.py From deel with MIT License | 5 votes |
def get_model(gpu): model = FasterRCNN(gpu) model.train = False serializers.load_npz('misc/VGG16_faster_rcnn_final.model', model) return model
Example #13
Source File: NNet.py From alpha-zero-general with MIT License | 5 votes |
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'): filepath = os.path.join(folder, filename) if not os.path.exists(filepath): raise ("No model in path {}".format(filepath)) serializers.load_npz(filepath, self.nnet)
Example #14
Source File: mnist.py From sagemaker-chainer-container with Apache License 2.0 | 5 votes |
def model_fn(model_dir): model = L.Classifier(MLP(1000, 10)) serializers.load_npz(os.path.join(model_dir, "model.npz"), model) return model.predictor
Example #15
Source File: single_machine_customer_script.py From sagemaker-chainer-container with Apache License 2.0 | 5 votes |
def model_fn(model_dir): model = L.Classifier(MLP(1000, 10)) serializers.load_npz(os.path.join(model_dir, 'model.npz'), model) return model.predictor
Example #16
Source File: distributed_customer_script_with_env_vars.py From sagemaker-chainer-container with Apache License 2.0 | 5 votes |
def model_fn(model_dir): model = L.Classifier(MLP(1000, 10)) serializers.load_npz(os.path.join(model_dir, 'model.npz'), model) return model.predictor
Example #17
Source File: single_machine_custom_loop.py From sagemaker-chainer-container with Apache License 2.0 | 5 votes |
def model_fn(model_dir): model = L.Classifier(MLP(1000, 10)) serializers.load_npz(os.path.join(model_dir, 'model.npz'), model) return model.predictor
Example #18
Source File: distributed_customer_script.py From sagemaker-chainer-container with Apache License 2.0 | 5 votes |
def model_fn(model_dir): model = L.Classifier(MLP(1000, 10)) serializers.load_npz(os.path.join(model_dir, 'model.npz'), model) return model.predictor
Example #19
Source File: resource.py From machine_learning_in_application with MIT License | 5 votes |
def load_model(self, model): if not os.path.exists(self.model_path): raise Exception("model file directory does not exist.") suffix = ".model" keyword = model.__class__.__name__.lower() candidates = [] for f in os.listdir(self.model_path): if keyword in f and f.endswith(suffix): candidates.append(f) candidates.sort() latest = candidates[-1] #print("targets {}, pick up {}.".format(candidates, latest)) model_file = os.path.join(self.model_path, latest) serializers.load_npz(model_file, model)
Example #20
Source File: generate.py From ConvLSTM with MIT License | 5 votes |
def generate(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', '-g', type=int, default=-1) parser.add_argument('--model', '-m', type=str, default=None) parser.add_argument('--id', '-i', type=int, default=0) parser.add_argument('--inf', type=int, default=10) parser.add_argument('--outf', type=int, default=10) args = parser.parse_args() test = dataset.MovingMnistDataset(0, 10000, args.inf, args.outf) model = network.MovingMnistNetwork(sz=[128, 64, 64], n=2, directory="img/") if args.model != None: print( "loading model from " + args.model ) serializers.load_npz(args.model, model) x, t = test[args.id] x = np.expand_dims(x, 0) t = np.expand_dims(t, 0) if args.gpu >= 0: cuda.get_device_from_id(0).use() model.to_gpu() x = cuda.cupy.array(x) t = cuda.cupy.array(t) res = model(Variable(x), Variable(t))
Example #21
Source File: train.py From deeppose with GNU General Public License v2.0 | 5 votes |
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None, adam_beta2=None, adam_eps=None, weight_decay=None, resume_opt=None): if opt == 'MomentumSGD': optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9) elif opt == 'Adam': optimizer = optimizers.Adam( alpha=adam_alpha, beta1=adam_beta1, beta2=adam_beta2, eps=adam_eps) elif opt == 'AdaGrad': optimizer = optimizers.AdaGrad(lr=lr) elif opt == 'RMSprop': optimizer = optimizers.RMSprop(lr=lr) else: raise Exception('No optimizer is selected') # The first model as the master model optimizer.setup(model) if opt == 'MomentumSGD': optimizer.add_hook( chainer.optimizer.WeightDecay(weight_decay)) if resume_opt is not None: serializers.load_npz(resume_opt, optimizer) return optimizer
Example #22
Source File: agent.py From chainerrl with MIT License | 5 votes |
def load_npz_no_strict(filename, obj): try: serializers.load_npz(filename, obj) except KeyError as e: warnings.warn(repr(e)) with numpy.load(filename) as f: d = serializers.NpzDeserializer(f, strict=False) d.load(obj)
Example #23
Source File: evaluate_flic.py From deeppose with GNU General Public License v2.0 | 5 votes |
def load_model(args): model_fn = os.path.basename(args.model) model_name = model_fn.split('.')[0] model = imp.load_source(model_name, args.model) model = getattr(model, model_name) model = model(args.joint_num) serializers.load_npz(args.param, model) model.train = False return model
Example #24
Source File: infer.py From tgan with MIT License | 5 votes |
def load_model(result_dir, config, model_type, snapshot_path=None): model_fn = '{}/{}'.format(result_dir, os.path.basename(config['models'][model_type]['fn'])) model_name = config['models'][model_type]['name'] kwargs = config['models'][model_type]['args'] model = imp.load_source(model_name, model_fn) model = getattr(model, model_name)(**kwargs) if snapshot_path: serializers.load_npz(snapshot_path, model) return model
Example #25
Source File: model_reader.py From context2vec with Apache License 2.0 | 5 votes |
def read_lstm_model(self, params, train): assert train == False # reading a model to continue training is currently not supported words_file = params['config_path'] + params['words_file'] model_file = params['config_path'] + params['model_file'] unit = int(params['unit']) deep = (params['deep'] == 'yes') drop_ratio = float(params['drop_ratio']) #read and normalize target word embeddings w, word2index, index2word = self.read_words(words_file) s = numpy.sqrt((w * w).sum(1)) s[s==0.] = 1. w /= s.reshape((s.shape[0], 1)) # normalize context_word_units = unit lstm_hidden_units = IN_TO_OUT_UNITS_RATIO*unit target_word_units = IN_TO_OUT_UNITS_RATIO*unit cs = [1 for _ in range(len(word2index))] # dummy word counts - not used for eval loss_func = L.NegativeSampling(target_word_units, cs, NEGATIVE_SAMPLING_NUM) # dummy loss func - not used for eval model = BiLstmContext(deep, self.gpu, word2index, context_word_units, lstm_hidden_units, target_word_units, loss_func, train, drop_ratio) S.load_npz(model_file, model) return w, word2index, index2word, model
Example #26
Source File: test_spectral_normalization.py From chainer with MIT License | 5 votes |
def check_serialization(self, backend_config): with utils.tempdir() as root: filename = os.path.join(root, 'tmp.npz') layer1 = self.layer.copy('copy') hook1 = copy.deepcopy(self.hook) layer1.add_hook(hook1) layer1.to_device(backend_config.device) x = backend_config.get_array(self.x) with backend_config: layer1(x) with chainer.using_config('train', False): y1 = layer1(x) serializers.save_npz(filename, layer1) layer2 = self.layer.copy('copy') hook2 = copy.deepcopy(self.hook) layer2.add_hook(hook2) # Test loading is nice. msg = None try: serializers.load_npz(filename, layer2) except Exception as e: msg = e assert msg is None with chainer.using_config('train', False): y2 = layer2(self.x.copy()) # Test attributes are the same. orig_weight = _cpu._to_cpu( getattr(layer1, hook1.weight_name).array) orig_vector = _cpu._to_cpu(getattr(layer1, hook1.vector_name)) numpy.testing.assert_array_equal( orig_weight, getattr(layer2, hook2.weight_name).array) numpy.testing.assert_array_equal( orig_vector, getattr(layer2, hook2.vector_name)) testing.assert_allclose(y1.array, y2.array)
Example #27
Source File: eval.py From knmt with GNU General Public License v3.0 | 5 votes |
def create_and_load_encdec_from_files(config_training_fn, trained_model): log.info("loading model config from %s" % config_training_fn) config_training = train_config.load_config_train(config_training_fn) encdec, eos_idx, src_indexer, tgt_indexer = train.create_encdec_and_indexers_from_config_dict(config_training) log.info("loading model from %s" % trained_model) serializers.load_npz(trained_model, encdec) return encdec, eos_idx, src_indexer, tgt_indexer
Example #28
Source File: test_once_trigger.py From chainer with MIT License | 5 votes |
def test_resumed_trigger_sparse_call(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) accumulated = False accumulated_finished = True with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.OnceTrigger(self.call_on_resume) for expected, finished in zip(self.resumed_expected[:self.resume], self.resumed_finished[:self.resume]): trainer.updater.update() accumulated = accumulated or expected accumulated_finished = accumulated_finished and finished if random.randrange(2): self.assertEqual(trigger.finished, accumulated_finished) self.assertEqual(trigger(trainer), accumulated) accumulated = False accumulated_finished = True serializers.save_npz(f.name, trigger) trigger = training.triggers.OnceTrigger(self.call_on_resume) serializers.load_npz(f.name, trigger) for expected, finished in zip(self.resumed_expected[self.resume:], self.resumed_finished[self.resume:]): trainer.updater.update() accumulated = accumulated or expected accumulated_finished = accumulated_finished and finished if random.randrange(2): self.assertEqual(trigger.finished, accumulated_finished) self.assertEqual(trigger(trainer), accumulated) accumulated = False accumulated_finished = True
Example #29
Source File: test_minmax_trigger.py From chainer with MIT License | 5 votes |
def _test_trigger(self, trigger, key, accuracies, expected, resume=None, save=None): trainer = testing.get_trainer_with_mock_updater( stop_trigger=(len(accuracies), 'iteration'), iter_per_epoch=self.iter_per_epoch) updater = trainer.updater def _serialize_updater(serializer): updater.iteration = serializer('iteration', updater.iteration) updater.epoch = serializer('epoch', updater.epoch) updater.is_new_epoch = serializer( 'is_new_epoch', updater.is_new_epoch) trainer.updater.serialize = _serialize_updater def set_observation(t): t.observation = {key: accuracies[t.updater.iteration-1]} trainer.extend(set_observation, name='set_observation', trigger=(1, 'iteration'), priority=2) invoked_iterations = [] def record(t): invoked_iterations.append(t.updater.iteration) trainer.extend(record, name='record', trigger=trigger, priority=1) if resume is not None: serializers.load_npz(resume, trainer) trainer.run() self.assertEqual(invoked_iterations, expected) if save is not None: serializers.save_npz(save, trainer)
Example #30
Source File: test_interval_trigger.py From chainer with MIT License | 5 votes |
def test_resumed_trigger(self): trainer = testing.get_trainer_with_mock_updater( stop_trigger=None, iter_per_epoch=self.iter_per_epoch) with tempfile.NamedTemporaryFile(delete=False) as f: trigger = training.triggers.IntervalTrigger(*self.interval) for expected in self.expected[:self.resume]: trainer.updater.update() self.assertEqual(trigger(trainer), expected) serializers.save_npz(f.name, trigger) trigger = training.triggers.IntervalTrigger(*self.interval) serializers.load_npz(f.name, trigger) for expected in self.expected[self.resume:]: trainer.updater.update() self.assertEqual(trigger(trainer), expected)