Python Model.Model() Examples

The following are 15 code examples of Model.Model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module Model , or try the search function .
Example #1
Source File: train.py    From Tom-Chang-Deep-Lyrics with MIT License 6 votes vote down vote up
def main(_):
    train_data = context_of_idx

    with tf.Graph().as_default(), tf.Session(config=config_tf) as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = Model.Model(is_training=True, config=config)

        tf.global_variables_initializer().run()

        model_saver = tf.train.Saver(tf.global_variables())

        for i in range(config.iteration):
            print("Training Epoch: %d ..." % (i+1))
            train_perplexity = run_epoch(session, m, train_data, m.train_op)
            print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))

            if (i+1) % config.save_freq == 0:
                print ("model saving ...")
                model_saver.save(session, config.model_path+'-%d'%(i+1))
                print ("Done!") 
Example #2
Source File: LearningMachine.py    From NeuronBlocks with MIT License 6 votes vote down vote up
def load_model(self, model_path):
        if self.use_gpu is True:
            self.model = torch.load(model_path)
            if isinstance(self.model, nn.DataParallel):
                self.model = self.model.module
            self.model.update_use_gpu(self.use_gpu)
            self.model.cuda()
            self.model = nn.DataParallel(self.model)
        else:
            self.model = torch.load(model_path, map_location='cpu')
            if isinstance(self.model, nn.DataParallel):
                self.model = self.model.module
            self.model.update_use_gpu(self.use_gpu)

        logging.info("Model %s loaded!" % model_path)
        logging.info("Total trainable parameters: %d" % (get_trainable_param_num(self.model))) 
Example #3
Source File: Model_test.py    From pymtl with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_ModelArgsHashWithDefault():

  M = ModelArgsHashWithDefault

  cmp_class_name_eq  ( M(      3,      4 ), M(      3,      4 ) )
  cmp_class_name_neq ( M(      3,      4 ), M(      5,      6 ) )

  cmp_class_name_eq  ( M(      3, arg2=4 ), M(      3, arg2=4 ) )
  cmp_class_name_neq ( M(      3, arg2=4 ), M(      5, arg2=6 ) )

  cmp_class_name_eq  ( M( arg1=3, arg2=4 ), M( arg1=3, arg2=4 ) )
  cmp_class_name_neq ( M( arg1=3, arg2=4 ), M( arg1=5, arg2=6 ) )

  cmp_class_name_eq  ( M( arg2=4, arg1=3 ), M( arg1=3, arg2=4 ) )
  cmp_class_name_neq ( M( arg2=4, arg1=3 ), M( arg1=5, arg2=6 ) )

  cmp_class_name_eq  ( M(      3 ), M(      3 ) )
  cmp_class_name_neq ( M(      3 ), M(      5 ) )

  cmp_class_name_eq  ( M( arg1=3 ), M( arg1=3 ) )
  cmp_class_name_neq ( M( arg1=3 ), M( arg1=5 ) )

#-----------------------------------------------------------------------
# ClassNameCollision
#-----------------------------------------------------------------------
# A model's class_name is generated during elaboration based on a hash of
# the list of arguments and their values. If two models have the same
# class name, same args, and same arg values (e.g., two Mux's each with 2
# ports and 47 bits, but one is one-hot and one is not), the hashes will
# collide. In Verilog translation, collided names result in both modules
# pointing at the same module definition, so one is incorrect.
#
# This collision is prevented by adding the model's __module__ to the hash
# generation (_gen_class_name). A class's __module__ will be different
# when importing from different modules.
#
# This test case creates two models of class name ClassNameCollisionModel,
# one in this module and one in the Model_dummy_test.py module. They have
# the same name and same args. The test case checks that their Model
# class_name's do not collide after elaborate. 
Example #4
Source File: trainIK.py    From RotationContinuity with MIT License 5 votes vote down vote up
def train(dances_lst, param):
    torch.cuda.set_device(param.device)
    
    print ("####Initiate model AE")
    
    model = Model.Model(joint_num=57,out_rotation_mode=param.out_rotation_mode)
    if(param.read_weight_path!=""):
        print ("Load "+param.read_weight_path)
        model.load_state_dict(torch.load(param.read_weight_path))
    model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=param.lr)#, betas=(0.5,0.9))
    model.train()
    
    model.initialize_skeleton_features("../data/standard.bvh")
   
    print ("start train")
 
    for iteration in range(param.start_iteration,param.total_iteration):   
                
        save_bvh=False
        if(iteration%param.save_bvh_iteration==0):
            save_bvh=True
        train_one_iteraton(param.logger, dances_lst,  param, model, optimizer, 
                           iteration, save_bvh )

        if(iteration%param.save_weight_iteration == 0):
           
            path = param.write_weight_folder + "model_%07d"%iteration 
            #print ("save_weight:  " + path)
            torch.save(model.state_dict(), path+".weight")
            
        if(iteration%10000 == 0):
            path = param.write_weight_folder + "model"
            torch.save(model.state_dict(), path+".weight") 
Example #5
Source File: Train.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._n_class = params['N_CLASS']
        self._divide_lr = params['DIVIDE_LEARNING_RATE_AT']

        self.data = Data(params)
        self.model = Model(params)

        self._save_path = os.path.abspath('./Model') 
Example #6
Source File: Infer.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._batch_size = params['BATCH_SIZE']

        self.data = Data(params)
        self.model = Model(params) 
Example #7
Source File: Infer.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._batch_size = params['BATCH_SIZE']
        self._top_k = params['PLOT_TOP_K']
        self._save_path = os.path.abspath(params['INFER_PATH'] + 'Plot')

        self.data = Data(params)
        self.model = Model(params) 
Example #8
Source File: Train.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._n_class = params['N_CLASS']
        self._divide_lr = params['DIVIDE_LEARNING_RATE_AT']

        self.data = Data(params)
        self.model = Model(params)

        self._save_path = os.path.abspath('./Model') 
Example #9
Source File: Train.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._n_class = params['N_CLASS']

        self.data = Data(params)
        self.model = Model(params)

        self._save_path = os.path.abspath('./Model') 
Example #10
Source File: Train.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._divide_lr = params['DIVIDE_LEARNING_RATE_AT']

        self.data = Data(params)
        n_class = len(params['REQD_LABELS'])
        self.model = Model(params, n_class=n_class)
        self._save_path = os.path.abspath('./Model') 
Example #11
Source File: Train.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._n_class = params['N_CLASS']
        self._divide_lr = params['DIVIDE_LEARNING_RATE_AT']

        self.data = Data(params)
        self.model = Model(params)

        self.save_path = os.path.abspath('./Model') 
Example #12
Source File: Augment.py    From TFHubSample with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, params):
        self._setup_files(params['SRC_PATH'], params['DST_PATH'])
        self.augmentations_per_image = params['AUGMENTATIONS_PER_IMAGE']

        self.model = Model(params) 
Example #13
Source File: GreenMachine.py    From GreenMachine with MIT License 4 votes vote down vote up
def createModel(config_path, checkpoint_path, graph_path):
    """ Create a TensorRT Model.
    config_path (string) - The path to the model config file.
    checkpoint_path (string) - The path to the model checkpoint file(s).
    graph_path (string) - The path to the model graph.
    returns (Model) - The TRT model built or loaded from the input files.
    """

    global build_graph, prev_classes

    trt_graph = None
    input_names = None
    
    if build_graph:
        frozen_graph, input_names, output_names = build_detection_graph(
            config=config_path,
            checkpoint=checkpoint_path
        )
    
        trt_graph = trt.create_inference_graph(
            input_graph_def=frozen_graph,
            outputs=output_names,
            max_batch_size=1,
            max_workspace_size_bytes=1 << 25,
            precision_mode='FP16',
            minimum_segment_size=50
        )

        with open(graph_path, 'wb') as f:
            f.write(trt_graph.SerializeToString())

        with open('config.txt', 'r+') as json_file:  
            data = json.load(json_file)
            data['model'] = []
            data['model'] = [{'input_names': input_names}]
            json_file.seek(0)
            json_file.truncate()
            json.dump(data, json_file)

    else:
        with open(graph_path, 'rb') as f:
            trt_graph = tf.GraphDef()
            trt_graph.ParseFromString(f.read())
        with open('config.txt') as json_file:  
            data = json.load(json_file)
            input_names = data['model'][0]['input_names']

    return Model(trt_graph, input_names) 
Example #14
Source File: Model_test.py    From pymtl with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_ClassNameCollision():
  model1 = ClassNameCollisionModel     ( 1, 2 ) # same arg values
  model2 = ClassNameCollisionModelDummy( 1, 2 ) # same arg values
  model1.elaborate()
  model2.elaborate()
  assert model1.class_name != model2.class_name

#-----------------------------------------------------------------------
# ClassNameCollisionSameModule
#-----------------------------------------------------------------------
# The ClassNameCollision test case checks for class name collisions due to
# same-name same-args classes in _different_ modules. Collisions can still
# happen if the same-name same-arg classes are in the same module. This
# test case checks for this kind of collision using two classes named
# "ClassNameCollision" placed at different levels of the hierarchy, but
# instantiated with the same name and same args.
#
# TODO: This corner case is not yet fixed and may not need to be fixed. If
# this seems like it is ever going to happen in practice, we will need
# this test case to pass. This test case will pass if we use __class__ in
# the class name generation (_gen_class_name). While this always avoids
# collisions, it also gives a differently named translated Verilog file on
# every run. Having the filename always changing can make it difficult for
# other tools to point to the generated Verilog. Using __module__ in the
# hash generation still avoids class name collisions across modules but
# also keeps the name of the translated Verilog file the same. It means we
# are not avoiding same-class-name same-args collisions in the same
# module, but this seems kind of rare.

# class ClassNameCollisionSameModule( Model ):
#   def __init__( s, arg1, arg2 ):
#     s.arg1 = arg1
#     s.arg2 = arg2
#
#   class ClassNameCollisionSameModule( Model ):
#     def __init__( s, arg1, arg2 ):
#       s.arg1 = arg1
#       s.arg2 = arg2
#
# def test_ClassNameCollisionSameModule():
#   model1 = ClassNameCollisionSameModule( 1, 2 )
#   model2 = ClassNameCollisionSameModule.ClassNameCollisionSameModule( 1, 2 )
#   model1.elaborate()
#   model2.elaborate()
#   assert model1.class_name != model2.class_name 
Example #15
Source File: LearningMachine.py    From NeuronBlocks with MIT License 4 votes vote down vote up
def __init__(self, phase, conf, problem, vocab_info=None, initialize=True, use_gpu=False, **kwargs):
        if initialize is True:
            assert vocab_info is not None
            self.model = Model(conf, problem, vocab_info, use_gpu)
            if use_gpu is True:
                self.model = nn.DataParallel(self.model)
                self.model = transfer_to_gpu(self.model)
            # judge the embedding matrix weight's device
            emb_weight_device = list(self.model.module.layers.embedding.embeddings.values())[0].weight.device.type if isinstance(self.model, nn.DataParallel) \
                else list(self.model.layers.embedding.embeddings.values())[0].weight.device.type
            device = 'GPU' if 'cuda' in emb_weight_device else 'CPU'
            logging.info(
                "The embedding matrix is on %s now, you can modify the weight_on_gpu parameter to change embeddings weight device." % device)
            logging.info("="*100 + '\n' + "*"*15 + "Model Achitecture" + "*"*15)
            logging.info(self.model)
            #logging.info("Total parameters: %d; trainable parameters: %d" % (get_param_num(self.model), get_trainable_param_num(self.model)))
            logging.info("Total trainable parameters: %d" % (get_trainable_param_num(self.model)))
            logging.info("Model built!")
        else:
            self.model = None

        self.conf = conf
        self.problem = problem
        self.phase = phase
        self.use_gpu = use_gpu

        # if it is a 2-class classification problem, figure out the real positive label
        # CAUTION: multi-class classification
        if phase != 'predict':
            if 'auc' in conf.metrics:
                if not hasattr(self.conf, 'pos_label') or self.conf.pos_label is None:
                    if problem.output_dict.cell_num() == 2 and \
                        problem.output_dict.has_cell("0") and problem.output_dict.has_cell("1"):
                        self.conf.pos_label = problem.output_dict.id("1")
                        logging.debug("Postive label (target index): %d" % self.conf.pos_label)
                    else:
                        # default
                        raise Exception('Please configure the positive label for auc metric at inputs/positive_label in the configuration file')
                else:
                    self.conf.pos_label = problem.output_dict.id(self.conf.pos_label)
            else:
                self.conf.pos_label = 1  # whatever

            self.metrics = conf.metrics
            if ProblemTypes[self.problem.problem_type] == ProblemTypes.classification \
                or ProblemTypes[self.problem.problem_type] == ProblemTypes.sequence_tagging:
                self.evaluator = Evaluator(metrics=self.metrics, pos_label=self.conf.pos_label, tagging_scheme=problem.tagging_scheme, label_indices=self.problem.output_dict.cell_id_map)
            elif ProblemTypes[self.problem.problem_type] == ProblemTypes.regression:
                self.evaluator = Evaluator(metrics=self.metrics, pos_label=self.conf.pos_label, tagging_scheme=problem.tagging_scheme, label_indices=None)
            elif ProblemTypes[self.problem.problem_type] == ProblemTypes.mrc:
                curr_mrc_metric = []
                for single_mrc_metric in self.metrics:
                    if 'mrc' in single_mrc_metric.lower():
                        curr_mrc_metric.append(single_mrc_metric.lower())
                    else:
                        curr_mrc_metric.append('mrc_' + single_mrc_metric.lower())
                self.evaluator = Evaluator(metrics=curr_mrc_metric, pos_label=self.conf.pos_label, tagging_scheme=problem.tagging_scheme, label_indices=None)
        self.use_gpu = use_gpu

        self.best_test_result = "(No best test result yet)"