Python keras.models.model_from_yaml() Examples

The following are 29 code examples of keras.models.model_from_yaml(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.models , or try the search function .
Example #1
Source File: worker.py    From elephas with MIT License 6 votes vote down vote up
def train(self, data_iterator):
        """Train a keras model on a worker
        """
        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss, metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        self.model.compile(optimizer=self.master_optimizer,
                           loss=self.master_loss,
                           metrics=self.master_metrics)

        weights_before_training = self.model.get_weights()
        if x_train.shape[0] > self.train_config.get('batch_size'):
            self.model.fit(x_train, y_train, **self.train_config)
        weights_after_training = self.model.get_weights()
        deltas = subtract_params(
            weights_before_training, weights_after_training)
        yield deltas 
Example #2
Source File: generic_utils.py    From artificial_neural_networks with Apache License 2.0 6 votes vote down vote up
def load_keras_model(h5_file, json_file=None, yaml_file=None, is_weights=False, from_json=True):
    """
    Utility to load the whole model
    """
    # third-party imports
    from keras.models import load_model, model_from_json, model_from_yaml

    if is_weights:
        if from_json:
            json_string = open(json_file, "r").read()
            model = model_from_json(json_string)
        else:
            yaml_string = open(yaml_file, "r").read()
            model = model_from_yaml(yaml_string)
        model.load_weights(h5_file)
    else:
        model = load_model(h5_file)

    return model


# %% 
Example #3
Source File: utils.py    From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 6 votes vote down vote up
def load_model(model_path):
    custom_layers = {
        "multihead_attention": multihead_attention,
        "Conv2D": L.Conv2D,
        "split_heads_2d": split_heads_2d,
        "local_attention_2d": local_attention_2d,
        "combine_heads_2d": combine_heads_2d
    }
    model = model_from_yaml(open(os.path.join(model_path, "arch.yaml")).read(), custom_objects=custom_layers)

    full_path = os.path.join(model_path, "weights.h5")
    with h5py.File(full_path, "r") as w:
        keys = list(w.keys())
        is_para = any(["model" in k for k in keys])

    if is_para:
        para_model = multi_gpu_model(model, gpus=2)
        para_model.load_weights(full_path)
        model = para_model.layers[-2]
    else:
        model.load_weights(full_path)

    print("Model " + model_path + " loaded")
    return model 
Example #4
Source File: utils.py    From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 6 votes vote down vote up
def load_model(model_path):
    custom_layers = {
        "multihead_attention": multihead_attention,
        "Conv2D": L.Conv2D,
        "split_heads_2d": split_heads_2d,
        "local_attention_2d": local_attention_2d,
        "combine_heads_2d": combine_heads_2d
    }
    model = model_from_yaml(open(os.path.join(model_path, "arch.yaml")).read(), custom_objects=custom_layers)

    full_path = os.path.join(model_path, "weights.h5")
    with h5py.File(full_path, "r") as w:
        keys = list(w.keys())
        is_para = any(["model" in k for k in keys])

    if is_para:
        para_model = multi_gpu_model(model, gpus=2)
        para_model.load_weights(full_path)
        model = para_model.layers[-2]
    else:
        model.load_weights(full_path)

    print("Model " + model_path + " loaded")
    return model 
Example #5
Source File: cat_dog.py    From deep_learning with MIT License 6 votes vote down vote up
def pred_data():

    with open('./models/cat_dog.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()
    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('./models/cat_dog.h5')

    sgd = Adam(lr=0.0003)
    model.compile(loss='categorical_crossentropy',optimizer=sgd, metrics=['accuracy'])

    images = []
    path='./data/test/'
    for f in os.listdir(path):
        img = image.load_img(path + f, target_size=image_size)
        img_array = image.img_to_array(img)

        x = np.expand_dims(img_array, axis=0)
        x = preprocess_input(x)
        result = model.predict_classes(x,verbose=0)

        print(f,result[0]) 
Example #6
Source File: recognition.py    From chinese-subtitle-ocr with MIT License 6 votes vote down vote up
def __init__(self, model_file, weights_file, dictionary):
        self.logger = logging.getLogger(__name__)

        with open(model_file, "r") as file:
            self.model = model_from_yaml(file.read())
            height = self.model.inputs[0].shape[1]
            self.img_size = (height, height)
        self.model.load_weights(weights_file)

        with open(dictionary, "r") as file:
            self.dictionary = {}
            data = file.read().split("\n")
            for index, character in enumerate(data):
                self.dictionary[index] = character

        self.logger.debug("Loaded model") 
Example #7
Source File: server.py    From EMNIST with MIT License 6 votes vote down vote up
def load_model(bin_dir):
    ''' Load model from .yaml and the weights from .h5

        Arguments:
            bin_dir: The directory of the bin (normally bin/)

        Returns:
            Loaded model from file
    '''

    # load YAML and create model
    yaml_file = open('%s/model.yaml' % bin_dir, 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    model = model_from_yaml(loaded_model_yaml)

    # load weights into new model
    model.load_weights('%s/model.h5' % bin_dir)
    return model 
Example #8
Source File: SlotTaggingModel_multitask.py    From end2end_dialog with MIT License 6 votes vote down vote up
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.maxlen_userUtter = np.int32(npzfile['maxlen_userUtter'][()])
        self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
        self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
        self.userIntent_vocab_size = np.int32(
            npzfile['userIntent_vocab_size'][()])
        self.id2userTag = npzfile['id2userTag'][()]
        self.id2word = npzfile['id2word'][()]
        self.id2userIntent = npzfile['id2userIntent'][()]
        self.userTag2id = npzfile['userTag2id'][()] 
Example #9
Source File: AgentActClassifyingModel.py    From end2end_dialog with MIT License 6 votes vote down vote up
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_folder is not found: {}'.format(self.model_folder)
        assert self.threshold is not None, 'Argument required: --threshold'
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        # load models
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
        self.userTagIntent_vocab_size = np.int32(npzfile['userTagIntent_vocab_size'][()])
        self.id2agentAct = npzfile['id2agentAct'][()]
        self.window_size = np.int32(npzfile['window_size'][()]) 
Example #10
Source File: hyperparam.py    From elephas with MIT License 6 votes vote down vote up
def best_models(self, nb_models, model, data, max_evals):
        trials_list = self.compute_trials(model, data, max_evals)
        num_trials = sum(len(trials) for trials in trials_list)
        if num_trials < nb_models:
            nb_models = len(trials_list)
        scores = []
        for trials in trials_list:
            scores = scores + [trial.get('result').get('loss')
                               for trial in trials]
        cut_off = sorted(scores, reverse=True)[nb_models - 1]
        model_list = []
        for trials in trials_list:
            for trial in trials:
                if trial.get('result').get('loss') >= cut_off:
                    model = model_from_yaml(trial.get('result').get('model'))
                    model.set_weights(pickle.loads(
                        trial.get('result').get('weights')))
                    model_list.append(model)
        return model_list 
Example #11
Source File: model.py    From rnnmorph with Apache License 2.0 5 votes vote down vote up
def load_eval(self, config: BuildModelConfig, eval_model_config_path: str,
                  eval_model_weights_path: str) -> None:
        with open(eval_model_config_path, "r", encoding='utf-8') as f:
            if config.use_crf:
                from keras_contrib.layers import CRF
                custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}
                self.eval_model = model_from_yaml(f.read(), custom_objects=custom_objects)
            else:
                custom_objects = {'ReversedLSTM': ReversedLSTM}
                self.eval_model = model_from_yaml(f.read(), custom_objects=custom_objects)
        self.eval_model.load_weights(eval_model_weights_path)
        self.eval_model._make_predict_function() 
Example #12
Source File: fcn.py    From lunania-ai with MIT License 5 votes vote down vote up
def load(self, model_name):
        model_path = os.path.join(config.model_dir, model_name, self.ALL_IN_MODEL_FILE_NAME)
        logger.debug('model_path: %s', model_path)
        exists_all_in_one = os.path.exists(model_path)
        if exists_all_in_one:
            logger.debug('use all in one.')
            self.model = models.load_model(model_path, custom_objects=None)
        else:
            model_path = os.path.join(config.model_dir, model_name, self.MODEL_FILE_NAME)
            with open(model_path, 'r') as yaml_string:
                self.model = models.model_from_yaml(yaml_string, custom_objects=None)
            self.model.load_weights(os.path.join(config.model_dir, model_name, self.WEIGHTS_FILE_NAME), by_name=True) 
Example #13
Source File: utils.py    From fetch with GNU General Public License v3.0 5 votes vote down vote up
def get_model(model_idx):
    """

    :param model_idx: model string between a--j
    :type model_idx: str
    :return: Model
    """
    # Get the model from the folder
    logging.info(f'Getting model {model_idx}')
    path = os.path.split(__file__)[0]
    model_yaml = glob.glob(f'{path}/models/{model_idx}_FT*/*yaml')[0]

    # Read the model from the yaml
    with open(model_yaml, 'r') as y:
        model = model_from_yaml(y.read())

    # get the model weights, if not present download them.
    model_list = pd.read_csv(f'{path}/models/model_list.csv')
    model_index = string.ascii_lowercase.index(model_idx)

    weights = get_file(model_list['model'][model_index], PATH_TO_WEIGHTS + model_list['model'][model_index],
                       file_hash=model_list['hash'][model_index], cache_subdir='models', hash_algorithm='md5')

    # dump weights
    model.load_weights(weights)

    return model 
Example #14
Source File: ml_model.py    From elephas with MIT License 5 votes vote down vote up
def _fit(self, df):
        """Private fit method of the Estimator, which trains the model.
        """
        simple_rdd = df_to_simple_rdd(df, categorical=self.get_categorical_labels(), nb_classes=self.get_nb_classes(),
                                      features_col=self.getFeaturesCol(), label_col=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        keras_model = model_from_yaml(self.get_keras_model_config())
        metrics = self.get_metrics()
        loss = self.get_loss()
        optimizer = get_optimizer(self.get_optimizer_config())
        keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        spark_model = SparkModel(model=keras_model,
                                 mode=self.get_mode(),
                                 frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.fit(simple_rdd,
                        epochs=self.get_epochs(),
                        batch_size=self.get_batch_size(),
                        verbose=self.get_verbosity(),
                        validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(labelCol=self.getLabelCol(),
                                  outputCol='prediction',
                                  keras_model_config=spark_model.master_network.to_yaml(),
                                  weights=weights) 
Example #15
Source File: utils.py    From Vocal-Melody-Extraction with MIT License 5 votes vote down vote up
def load_model(model_name):
    """


    """
    ext = '.yaml'
    model = model_from_yaml(open(model_name + ext).read())
    model.load_weights(model_name + '_weights.h5')

    print("model " + model_name + " loaded")
    return model 
Example #16
Source File: bot_vs_bot.py    From betago with MIT License 5 votes vote down vote up
def load_keras_bot(bot_name):
    model_file = 'model_zoo/' + bot_name + '_bot.yml'
    weight_file = 'model_zoo/' + bot_name + '_weights.hd5'
    with open(model_file, 'r') as f:
        yml = yaml.load(f)
        model = model_from_yaml(yaml.dump(yml))
        # Note that in Keras 1.0 we have to recompile the model explicitly
        model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
        model.load_weights(weight_file)
    processor = SevenPlaneProcessor()
    return KerasBot(model=model, processor=processor) 
Example #17
Source File: ensemble.py    From hyperas with MIT License 5 votes vote down vote up
def voting_model_from_yaml(yaml_list, voting='hard', weights=None):
    model_list = map(lambda yml: model_from_yaml(yml), yaml_list)
    return VotingModel(model_list, voting, weights) 
Example #18
Source File: char_embeddings_model.py    From rnnmorph with Apache License 2.0 5 votes vote down vote up
def load(self, model_config_path: str, model_weights_path: str) -> None:
        with open(model_config_path, "r", encoding='utf-8') as f:
            self.model = model_from_yaml(f.read())
        self.model.load_weights(model_weights_path)
        self.char_layer = TimeDistributed(Model(self.model.input_layers[0].output, self.model.layers[-2].input)) 
Example #19
Source File: utils.py    From ml-hv-grid-pub with MIT License 5 votes vote down vote up
def load_model(model_fpath, weights_fpath):
    """Load a model from yaml architecture and h5 weights."""
    assert model_fpath[-5:] == '.yaml'
    assert weights_fpath[-3:] == '.h5'

    with open(model_fpath, "r") as yaml_file:
        yaml_architecture = yaml_file.read()

    model = model_from_yaml(yaml_architecture)
    model.load_weights(weights_fpath)

    return model 
Example #20
Source File: model.py    From rnnmorph with Apache License 2.0 5 votes vote down vote up
def load_train(self, config: BuildModelConfig, model_config_path: str=None, model_weights_path: str=None):
        with open(model_config_path, "r", encoding='utf-8') as f:
            if config.use_crf:
                from keras_contrib.layers import CRF
                custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}
                self.train_model = model_from_yaml(f.read(), custom_objects=custom_objects)
            else:
                custom_objects = {'ReversedLSTM': ReversedLSTM}
                self.train_model = model_from_yaml(f.read(), custom_objects=custom_objects)
        self.train_model.load_weights(model_weights_path)

        loss = {}
        metrics = {}
        if config.use_crf:
            out_layer_name = 'crf'
            offset = 0
            if config.use_pos_lm:
                offset += 2
            if config.use_word_lm:
                offset += 2
            loss[out_layer_name] = self.train_model.layers[-1-offset].loss_function
            metrics[out_layer_name] = self.train_model.layers[-1-offset].accuracy
        else:
            out_layer_name = 'main_pred'
            loss[out_layer_name] = 'sparse_categorical_crossentropy'
            metrics[out_layer_name] = 'accuracy'

        if config.use_pos_lm:
            prev_layer_name = 'shifted_pred_prev'
            next_layer_name = 'shifted_pred_next'
            loss[prev_layer_name] = loss[next_layer_name] = 'sparse_categorical_crossentropy'
            metrics[prev_layer_name] = metrics[next_layer_name] = 'accuracy'
        self.train_model.compile(Adam(clipnorm=5.), loss=loss, metrics=metrics)

        self.eval_model = Model(inputs=self.train_model.inputs, outputs=self.train_model.outputs[0]) 
Example #21
Source File: face.py    From deep_learning with MIT License 5 votes vote down vote up
def pred_data():

    with open('./models/face.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()
    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('./models/face_weight.h5')

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy',
                  metrics=['accuracy'])
    
    plot_model(model,to_file='./models/face_model.png') 
Example #22
Source File: ml_model.py    From elephas with MIT License 5 votes vote down vote up
def get_model(self):
        return model_from_yaml(self.get_keras_model_config()) 
Example #23
Source File: textAnalysis.py    From deep_learning with MIT License 5 votes vote down vote up
def get_model():
    # # 加载网络结构
    # with open('./models/text_lstm.yaml', 'r') as yaml_file:
    #     loaded_model_yaml = yaml_file.read()
    # model = model_from_yaml(loaded_model_yaml)
    # # 加载模型权重
    # model.load_weights("./models/text_lstm.h5")
    # print("model Loaded")
    # model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
                  
    # utils.plot_model(model,to_file='./models/text_lstm_model.png')

    model = load_model("./models/text_lstm_full.h5")

    return model 
Example #24
Source File: ml_model.py    From elephas with MIT License 5 votes vote down vote up
def _transform(self, df):
        """Private transform method of a Transformer. This serves as batch-prediction method for our purposes.
        """
        output_col = self.getOutputCol()
        label_col = self.getLabelCol()
        new_schema = copy.deepcopy(df.schema)
        new_schema.add(StructField(output_col, StringType(), True))

        rdd = df.rdd.coalesce(1)
        features = np.asarray(
            rdd.map(lambda x: from_vector(x.features)).collect())
        # Note that we collect, since executing this on the rdd would require model serialization once again
        model = model_from_yaml(self.get_keras_model_config())
        model.set_weights(self.weights.value)
        predictions = rdd.ctx.parallelize(
            model.predict_classes(features)).coalesce(1)
        predictions = predictions.map(lambda x: tuple(str(x)))

        results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
        results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
        results_df = results_df.withColumn(
            output_col, results_df[output_col].cast(DoubleType()))
        results_df = results_df.withColumn(
            label_col, results_df[label_col].cast(DoubleType()))

        return results_df 
Example #25
Source File: JointModel_multitask_jointraining.py    From end2end_dialog with MIT License 5 votes vote down vote up
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.id2agentAct = npzfile['id2agentAct'][()]
        self.id2word = npzfile['id2word'][()]
        self.id2userTag = npzfile['id2userTag'][()]
        self.userTag2id = npzfile['userTag2id'][()]
        self.id2userIntent = npzfile['id2userIntent'][()]
        self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
        self.userIntent_vocab_size = np.int32(npzfile['userIntent_vocab_size'][()])
        self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
        self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
        self.maxlen_userUtter = npzfile['maxlen_userUtter'][()]
        self.window_size = np.int32(npzfile['window_size'][()]) 
Example #26
Source File: utils.py    From ml-hv-grid-pub with MIT License 5 votes vote down vote up
def save_model_yaml(model, model_fpath):
    from keras.models import model_from_yaml

    """Save pre-trained Keras model."""
    with open(model_fpath, "w") as yaml_file:
        yaml_file.write(model.to_yaml()) 
Example #27
Source File: keras_to_tensorflow.py    From Maix_Toolbox with Apache License 2.0 4 votes vote down vote up
def load_model(input_model_path, input_json_path=None, input_yaml_path=None):
    if not Path(input_model_path).exists():
        raise FileNotFoundError(
            'Model file `{}` does not exist.'.format(input_model_path))
    try:
        model = keras.models.load_model(input_model_path)
        return model
    except FileNotFoundError as err:
        logging.error('Input mode file (%s) does not exist.', FLAGS.input_model)
        raise err
    except ValueError as wrong_file_err:
        if input_json_path:
            if not Path(input_json_path).exists():
                raise FileNotFoundError(
                    'Model description json file `{}` does not exist.'.format(
                        input_json_path))
            try:
                model = model_from_json(open(str(input_json_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from json.")
                raise err
        elif input_yaml_path:
            if not Path(input_yaml_path).exists():
                raise FileNotFoundError(
                    'Model description yaml file `{}` does not exist.'.format(
                        input_yaml_path))
            try:
                model = model_from_yaml(open(str(input_yaml_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from yaml.")
                raise err
        else:
            logging.error(
                'Input file specified only holds the weights, and not '
                'the model definition. Save the model using '
                'model.save(filename.h5) which will contain the network '
                'architecture as well as its weights. '
                'If the model is saved using the '
                'model.save_weights(filename) function, either '
                'input_model_json or input_model_yaml flags should be set to '
                'to import the network architecture prior to loading the '
                'weights. \n'
                'Check the keras documentation for more details '
                '(https://keras.io/getting-started/faq/)')
            raise wrong_file_err 
Example #28
Source File: worker.py    From elephas with MIT License 4 votes vote down vote up
def train(self, data_iterator):
        """Train a keras model on a worker and send asynchronous updates
        to parameter server
        """
        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        if x_train.size == 0:
            return

        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss, metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        epochs = self.train_config['epochs']
        batch_size = self.train_config.get('batch_size')
        nb_train_sample = x_train.shape[0]
        nb_batch = int(np.ceil(nb_train_sample / float(batch_size)))
        index_array = np.arange(nb_train_sample)
        batches = [
            (i * batch_size, min(nb_train_sample, (i + 1) * batch_size))
            for i in range(0, nb_batch)
        ]

        if self.frequency == 'epoch':
            for epoch in range(epochs):
                weights_before_training = self.client.get_parameters()
                self.model.set_weights(weights_before_training)
                self.train_config['epochs'] = 1
                if x_train.shape[0] > batch_size:
                    self.model.fit(x_train, y_train, **self.train_config)
                self.train_config['epochs'] = epochs
                weights_after_training = self.model.get_weights()
                deltas = subtract_params(
                    weights_before_training, weights_after_training)
                self.client.update_parameters(deltas)
        elif self.frequency == 'batch':
            for epoch in range(epochs):
                if x_train.shape[0] > batch_size:
                    for (batch_start, batch_end) in batches:
                        weights_before_training = self.client.get_parameters()
                        self.model.set_weights(weights_before_training)
                        batch_ids = index_array[batch_start:batch_end]
                        x = slice_arrays(x_train, batch_ids)
                        y = slice_arrays(y_train, batch_ids)
                        self.model.train_on_batch(x, y)
                        weights_after_training = self.model.get_weights()
                        deltas = subtract_params(
                            weights_before_training, weights_after_training)
                        self.client.update_parameters(deltas)
        else:
            raise ValueError(
                'frequency parameter can be `epoch` or `batch, got {}'.format(self.frequency))
        yield [] 
Example #29
Source File: keras_to_tensorflow.py    From keras_to_tensorflow with MIT License 4 votes vote down vote up
def load_model(input_model_path, input_json_path=None, input_yaml_path=None):
    if not Path(input_model_path).exists():
        raise FileNotFoundError(
            'Model file `{}` does not exist.'.format(input_model_path))
    try:
        model = keras.models.load_model(input_model_path)
        return model
    except FileNotFoundError as err:
        logging.error('Input mode file (%s) does not exist.', FLAGS.input_model)
        raise err
    except ValueError as wrong_file_err:
        if input_json_path:
            if not Path(input_json_path).exists():
                raise FileNotFoundError(
                    'Model description json file `{}` does not exist.'.format(
                        input_json_path))
            try:
                model = model_from_json(open(str(input_json_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from json.")
                raise err
        elif input_yaml_path:
            if not Path(input_yaml_path).exists():
                raise FileNotFoundError(
                    'Model description yaml file `{}` does not exist.'.format(
                        input_yaml_path))
            try:
                model = model_from_yaml(open(str(input_yaml_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from yaml.")
                raise err
        else:
            logging.error(
                'Input file specified only holds the weights, and not '
                'the model definition. Save the model using '
                'model.save(filename.h5) which will contain the network '
                'architecture as well as its weights. '
                'If the model is saved using the '
                'model.save_weights(filename) function, either '
                'input_model_json or input_model_yaml flags should be set to '
                'to import the network architecture prior to loading the '
                'weights. \n'
                'Check the keras documentation for more details '
                '(https://keras.io/getting-started/faq/)')
            raise wrong_file_err