Python model.create_model() Examples
The following are 10
code examples of model.create_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
model
, or try the search function
.
Example #1
Source File: demo.py From pytorch-pose-hg-3d with GNU General Public License v3.0 | 6 votes |
def main(opt): opt.heads['depth'] = opt.num_output if opt.load_model == '': opt.load_model = '../models/fusion_3d_var.pth' if opt.gpus[0] >= 0: opt.device = torch.device('cuda:{}'.format(opt.gpus[0])) else: opt.device = torch.device('cpu') model, _, _ = create_model(opt) model = model.to(opt.device) model.eval() if os.path.isdir(opt.demo): ls = os.listdir(opt.demo) for file_name in sorted(ls): if is_image(file_name): image_name = os.path.join(opt.demo, file_name) print('Running {} ...'.format(image_name)) image = cv2.imread(image_name) demo_image(image, model, opt) elif is_image(opt.demo): print('Running {} ...'.format(opt.demo)) image = cv2.imread(opt.demo) demo_image(image, model, opt)
Example #2
Source File: task.py From cloudml-edge-automation with Apache License 2.0 | 5 votes |
def main(_): model, argv = model_lib.create_model() run(model, argv)
Example #3
Source File: task.py From cloudml-samples with Apache License 2.0 | 5 votes |
def main(_): model, argv = model_lib.create_model() run(model, argv)
Example #4
Source File: solvers.py From ganimation_replicate with MIT License | 5 votes |
def init_train_setting(self): self.train_dataset = create_dataloader(self.opt) self.train_model = create_model(self.opt) self.train_total_steps = 0 self.epoch_len = self.opt.niter + self.opt.niter_decay self.cur_lr = self.opt.lr
Example #5
Source File: solvers.py From ganimation_replicate with MIT License | 5 votes |
def init_test_setting(self, opt): self.test_dataset = create_dataloader(opt) self.test_model = create_model(opt)
Example #6
Source File: predict.py From funcom with GNU General Public License v3.0 | 5 votes |
def load_model_from_weights(modelpath, modeltype, datvocabsize, comvocabsize, smlvocabsize, datlen, comlen, smllen): config = dict() config['datvocabsize'] = datvocabsize config['comvocabsize'] = comvocabsize config['datlen'] = datlen # length of the data config['comlen'] = comlen # comlen sent us in workunits config['smlvocabsize'] = smlvocabsize config['smllen'] = smllen model = create_model(modeltype, config) model.load_weights(modelpath) return model
Example #7
Source File: test.py From Image-Caption-Generator with MIT License | 5 votes |
def predict(image_name, data_dir="/home/shagun/projects/Image-Caption-Generator/data/", weights_path=None, mode="test"): '''Method to predict the caption for a given image. weights_path is the path to the .h5 file (model)''' image_path = data_dir + "images/" + image_name vgg_model = load_vgg16() vgg_embedding = vgg_model.predict( load_image(image_path) ) image_embeddings = [vgg_embedding] config_dict = generate_config(data_dir=data_dir, mode=mode) print(config_dict) model = create_model(config_dict=config_dict, compile_model=False) model.load_weights(data_dir + "model/" + weights_path) tokenizer = get_tokenizer(config_dict=config_dict, data_dir=data_dir) index_to_word = {v: k for k, v in tokenizer.word_index.items()} for image_embedding in image_embeddings: gen_captions(config=config_dict, model=model, image_embedding=image_embedding, tokenizer=tokenizer, num_captions=2, index_to_word=index_to_word )
Example #8
Source File: task.py From cloudml-samples with Apache License 2.0 | 4 votes |
def train_and_evaluate(args): model = model_lib.create_model(args) env = json.loads(os.environ.get('TF_CONFIG', '{}')) # Print the job data as provided by the service. logging.info('Original job data: %s', env.get('job', {})) # First find out if there's a task value on the environment variable. # If there is none or it is empty define a default one. task_data = env.get('task', None) or {'type': 'master', 'index': 0} task = type('TaskSpec', (object,), task_data) trial = task_data.get('trial') if trial is not None: args.output_path = os.path.join(args.output_path, trial) if args.write_to_tmp and args.output_path.startswith('gs://'): output_path = args.output_path args.output_path = os.path.join('/tmp/', str(uuid.uuid4())) os.makedirs(args.output_path) else: output_path = None if args.copy_train_data_to_tmp: args.train_data_paths = copy_data_to_tmp(args.train_data_paths) if args.copy_eval_data_to_tmp: args.eval_data_paths = copy_data_to_tmp(args.eval_data_paths) if not args.eval_batch_size: # If eval_batch_size not set, use min of batch_size and eval_set_size args.eval_batch_size = min(args.batch_size, args.eval_set_size) logging.info("setting eval batch size to %s", args.eval_batch_size) cluster_data = env.get('cluster', None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None if args.write_predictions: write_predictions(args, model, cluster, task) else: dispatch(args, model, cluster, task) if output_path and (not cluster or not task or task.type == 'master'): subprocess.check_call([ 'gsutil', '-m', '-q', 'cp', '-r', args.output_path + '/*', output_path ]) shutil.rmtree(args.output_path, ignore_errors=True)
Example #9
Source File: predict.py From recipe-summarization with MIT License | 4 votes |
def main(sample_str=None): """Predict a title for a recipe.""" # load model parameters used for training with open(path.join(path_models, 'model_params.json'), 'r') as f: model_params = json.load(f) # create placeholder model model = create_model(**model_params) # load weights from training run load_weights(model, path.join(path_models, '{}.hdf5'.format(FN1))) # load recipe titles and descriptions with open(path.join(path_data, 'vocabulary-embedding.data.pkl'), 'rb') as fp: X_data, Y_data = pickle.load(fp) # load vocabulary with open(path.join(path_data, '{}.pkl'.format(FN0)), 'rb') as fp: embedding, idx2word, word2idx, glove_idx2idx = pickle.load(fp) vocab_size, embedding_size = embedding.shape oov0 = vocab_size - nb_unknown_words if sample_str is None: # load random recipe description if none provided i = np.random.randint(len(X_data)) sample_str = '' sample_title = '' for w in X_data[i]: sample_str += idx2word[w] + ' ' for w in Y_data[i]: sample_title += idx2word[w] + ' ' y = Y_data[i] print('Randomly sampled recipe:') print(sample_title) print(sample_str) else: sample_title = '' y = [eos] x = [word2idx[w.rstrip('^')] for w in sample_str.split()] samples = gensamples( skips=2, k=1, batch_size=2, short=False, temperature=1., use_unk=True, model=model, data=(x, y), idx2word=idx2word, oov0=oov0, glove_idx2idx=glove_idx2idx, vocab_size=vocab_size, nb_unknown_words=nb_unknown_words, ) headline = samples[0][0][len(samples[0][1]):] ' '.join(idx2word[w] for w in headline)
Example #10
Source File: train.py From Image-Caption-Generator with MIT License | 4 votes |
def train(batch_size=128, epochs=100, data_dir="/home/shagun/projects/Image-Caption-Generator/data/", weights_path=None, mode="train"): '''Method to train the image caption generator weights_path is the path to the .h5 file where weights from the previous run are saved (if available)''' config_dict = generate_config(data_dir=data_dir, mode=mode) config_dict['batch_size'] = batch_size steps_per_epoch = config_dict["total_number_of_examples"] // batch_size print("steps_per_epoch = ", steps_per_epoch) train_data_generator = debug_generator(config_dict=config_dict, data_dir=data_dir) model = create_model(config_dict=config_dict) if weights_path: model.load_weights(weights_path) file_name = data_dir + "model/weights-{epoch:02d}.hdf5" checkpoint = ModelCheckpoint(filepath=file_name, monitor='loss', verbose=1, save_best_only=True, mode='min') tensorboard = TensorBoard(log_dir='../logs', histogram_freq=0, batch_size=batch_size, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) callbacks_list = [checkpoint, tensorboard] model.fit_generator( generator=train_data_generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=2, callbacks=callbacks_list)