Python keras.models.load_model() Examples
The following are 30
code examples of keras.models.load_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.models
, or try the search function
.
Example #1
Source File: yolo.py From Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow with MIT License | 6 votes |
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.' self.yolo_model = load_model(model_path, compile=False) print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) random.seed(10101) # Fixed seed for consistent colors across runs. random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
Example #2
Source File: localizer.py From cnn-levelset with MIT License | 6 votes |
def __init__(self, model_path=None): if model_path is not None: self.model = self.load_model(model_path) else: # VGG16 last conv features inputs = Input(shape=(7, 7, 512)) x = Convolution2D(128, 1, 1)(inputs) x = Flatten()(x) # Cls head h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x) h_cls = Dropout(p=0.5)(h_cls) cls_head = Dense(20, activation='softmax', name='cls')(h_cls) # Reg head h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x) h_reg = Dropout(p=0.5)(h_reg) reg_head = Dense(4, activation='linear', name='reg')(h_reg) # Joint model self.model = Model(input=inputs, output=[cls_head, reg_head])
Example #3
Source File: test.py From Sound-Recognition-Tutorial with Apache License 2.0 | 6 votes |
def CNN_test(test_fold, feat): """ Test model using test set :param test_fold: test fold of 5-fold cross validation :param feat: which feature to use """ # 读取测试数据 _, _, test_features, test_labels = esc10_input.get_data(test_fold, feat) # 导入训练好的模型 model = load_model('./saved_model/cnn_{}_fold{}.h5'.format(feat, test_fold)) # 输出训练好的模型在测试集上的表现 score = model.evaluate(test_features, test_labels) print('Test score:', score[0]) print('Test accuracy:', score[1]) return score[1]
Example #4
Source File: imagenet.py From vergeml with MIT License | 6 votes |
def load(self, model_dir, architecture, image_size): from keras.models import load_model from vergeml.sources.features import get_preprocess_input labels_txt = os.path.join(model_dir, "labels.txt") if not os.path.exists(labels_txt): raise VergeMLError("labels.txt not found: {}".format(labels_txt)) model_h5 = os.path.join(model_dir, "model.h5") if not os.path.exists(model_h5): raise VergeMLError("model.h5 not found: {}".format(model_h5)) with open(labels_txt, "r") as f: self.labels = f.read().splitlines() self.model = load_model(model_h5) self.image_size = image_size self.preprocess_input = get_preprocess_input(architecture)
Example #5
Source File: features.py From vergeml with MIT License | 6 votes |
def get_custom_architecture(name, trainings_dir, output_layer): from keras.models import load_model, Model name = name.lstrip("@") model = load_model(os.path.join(trainings_dir, name, 'checkpoints', 'model.h5')) try: if isinstance(output_layer, int): layer = model.layers[output_layer] else: layer = model.get_layer(output_layer) except Exception: if isinstance(output_layer, int): raise VergeMLError(f'output-layer {output_layer} not found - model has only {len(model.layers)} layers.') else: candidates = list(map(lambda l: l.name, model.layers)) raise VergeMLError(f'output-layer named {output_layer} not found.', suggestion=did_you_mean(candidates, output_layer)) model = Model(inputs=model.input, outputs=layer.output) return model
Example #6
Source File: yolo.py From multi-object-tracking with GNU General Public License v3.0 | 6 votes |
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.' self.yolo_model = load_model(model_path, compile=False) print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) random.seed(10101) # Fixed seed for consistent colors across runs. random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
Example #7
Source File: baseline.py From MELD with GNU General Public License v3.0 | 6 votes |
def test_model(self): model = load_model(self.PATH) intermediate_layer_model = Model(input=model.input, output=model.get_layer("utter").output) intermediate_output_train = intermediate_layer_model.predict(self.train_x) intermediate_output_val = intermediate_layer_model.predict(self.val_x) intermediate_output_test = intermediate_layer_model.predict(self.test_x) train_emb, val_emb, test_emb = {}, {}, {} for idx, ID in enumerate(self.train_id): train_emb[ID] = intermediate_output_train[idx] for idx, ID in enumerate(self.val_id): val_emb[ID] = intermediate_output_val[idx] for idx, ID in enumerate(self.test_id): test_emb[ID] = intermediate_output_test[idx] pickle.dump([train_emb, val_emb, test_emb], open(self.OUTPUT_PATH, "wb")) self.calc_test_result(model.predict(self.test_x), self.test_y, self.test_mask)
Example #8
Source File: grad_cam.py From face_classification with MIT License | 6 votes |
def modify_backprop(model, name, task): graph = tf.get_default_graph() with graph.gradient_override_map({'Relu': name}): # get layers that have an activation activation_layers = [layer for layer in model.layers if hasattr(layer, 'activation')] # replace relu activation for layer in activation_layers: if layer.activation == keras.activations.relu: layer.activation = tf.nn.relu # re-instanciate a new model if task == 'gender': model_path = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5' elif task == 'emotion': model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' # model_path = '../trained_models/fer2013_mini_XCEPTION.119-0.65.hdf5' # model_path = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5' new_model = load_model(model_path, compile=False) return new_model
Example #9
Source File: reinforcement_algorithm.py From evo-pawness with GNU General Public License v3.0 | 6 votes |
def __init__(self, init_state, max_simulation=AlphaZeroConfig.MAX_SIMULATION_AGENT, MODEL_PATH=AlphaZeroConfig.DEFAULT_MODEL_AGENT): """ Contructor of AlphaZero Agent :param init_state: the initial state of the game. It will be used continuously :param max_simulation: MCTS max simulation :param MODEL_PATH: Model Path used for AlphaZero Agent """ self.max_simulation = max_simulation all_action_spaces = action_spaces_new() self.ae = ActionEncoder() self.ae.fit(list_all_action=all_action_spaces) self.stacked_state = StackedState(init_state) self.deepnet_model = PawnNetZero(len(all_action_spaces)) self.deepnet_model.model = load_model(MODEL_PATH) self.mcts = MCTreeSearch(self.deepnet_model.model, 1, self.max_simulation, self.ae, self.stacked_state)
Example #10
Source File: classifier.py From shopping-classification with Apache License 2.0 | 6 votes |
def predict(self, data_root, model_root, test_root, test_div, out_path, readable=False): meta_path = os.path.join(data_root, 'meta') meta = cPickle.loads(open(meta_path, 'rb').read()) model_fname = os.path.join(model_root, 'model.h5') self.logger.info('# of classes(train): %s' % len(meta['y_vocab'])) model = load_model(model_fname, custom_objects={'top1_acc': top1_acc}) test_path = os.path.join(test_root, 'data.h5py') test_data = h5py.File(test_path, 'r') test = test_data[test_div] batch_size = opt.batch_size pred_y = [] test_gen = ThreadsafeIter(self.get_sample_generator(test, batch_size, raise_stop_event=True)) total_test_samples = test['uni'].shape[0] with tqdm.tqdm(total=total_test_samples) as pbar: for chunk in test_gen: total_test_samples = test['uni'].shape[0] X, _ = chunk _pred_y = model.predict(X) pred_y.extend([np.argmax(y) for y in _pred_y]) pbar.update(X[0].shape[0]) self.write_prediction_result(test, pred_y, meta, out_path, readable=readable)
Example #11
Source File: grad_cam.py From Emotion with MIT License | 6 votes |
def modify_backprop(model, name, task): graph = tf.get_default_graph() with graph.gradient_override_map({'Relu': name}): # get layers that have an activation activation_layers = [layer for layer in model.layers if hasattr(layer, 'activation')] # replace relu activation for layer in activation_layers: if layer.activation == keras.activations.relu: layer.activation = tf.nn.relu # re-instanciate a new model if task == 'gender': model_path = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5' elif task == 'emotion': model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' # model_path = '../trained_models/fer2013_mini_XCEPTION.119-0.65.hdf5' # model_path = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5' new_model = load_model(model_path, compile=False) return new_model
Example #12
Source File: grad_cam.py From Face-and-Emotion-Recognition with MIT License | 6 votes |
def modify_backprop(model, name, task): graph = tf.get_default_graph() with graph.gradient_override_map({'Relu': name}): # get layers that have an activation activation_layers = [layer for layer in model.layers if hasattr(layer, 'activation')] # replace relu activation for layer in activation_layers: if layer.activation == keras.activations.relu: layer.activation = tf.nn.relu # re-instanciate a new model if task == 'gender': model_path = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5' elif task == 'emotion': model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' # model_path = '../trained_models/fer2013_mini_XCEPTION.119-0.65.hdf5' # model_path = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5' new_model = load_model(model_path, compile=False) return new_model
Example #13
Source File: yolo.py From YOLO-3D-Box with MIT License | 6 votes |
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.' self.yolo_model = load_model(model_path, compile=False) print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) random.seed(10101) # Fixed seed for consistent colors across runs. random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
Example #14
Source File: yolo.py From deep_sort_yolov3 with GNU General Public License v3.0 | 6 votes |
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.' self.yolo_model = load_model(model_path, compile=False) print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) random.seed(10101) # Fixed seed for consistent colors across runs. random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
Example #15
Source File: pspnet.py From PSPNet-Keras-tensorflow with MIT License | 6 votes |
def __init__(self, nb_classes, resnet_layers, input_shape, weights): self.input_shape = input_shape self.num_classes = nb_classes json_path = join("weights", "keras", weights + ".json") h5_path = join("weights", "keras", weights + ".h5") if 'pspnet' in weights: if os.path.isfile(json_path) and os.path.isfile(h5_path): print("Keras model & weights found, loading...") with CustomObjectScope({'Interp': layers.Interp}): with open(json_path) as file_handle: self.model = model_from_json(file_handle.read()) self.model.load_weights(h5_path) else: print("No Keras model & weights found, import from npy weights.") self.model = layers.build_pspnet(nb_classes=nb_classes, resnet_layers=resnet_layers, input_shape=self.input_shape) self.set_npy_weights(weights) else: print('Load pre-trained weights') self.model = load_model(weights)
Example #16
Source File: load_trained_models.py From Coloring-greyscale-images with MIT License | 6 votes |
def __init__(self, resource_path='./resources/', learning_rate=0.0002, decay_rate=2e-6, gpus = 1): self.gpus = gpus self.learning_rate = learning_rate self.decay_rate = decay_rate def zero_loss(y_true, y_pred): return K.zeros_like(y_true) discriminator_full = load_model(resource_path + 'discriminator_full.h5', custom_objects={'Conv2D_r': Conv2D_r, 'InstanceNormalization': InstanceNormalization, 'tf': tf, 'zero_loss': zero_loss, 'ConvSN2D': ConvSN2D, 'DenseSN': DenseSN}) discriminator_full.trainable = True discriminator_full.name = "discriminator_full" self.model = discriminator_full self.save_model = discriminator_full
Example #17
Source File: load_trained_models.py From Coloring-greyscale-images with MIT License | 6 votes |
def __init__(self, resource_path='./resources/', learning_rate=0.0002, decay_rate=2e-6, gpus = 0): self.gpus = gpus self.learning_rate = learning_rate self.decay_rate = decay_rate def zero_loss(y_true, y_pred): return K.zeros_like(y_true) discriminator_low = load_model(resource_path + 'discriminator_low.h5', custom_objects={'Conv2D_r': Conv2D_r, 'InstanceNormalization': InstanceNormalization, 'tf': tf,'zero_loss': zero_loss, 'ConvSN2D': ConvSN2D, 'DenseSN': DenseSN}) discriminator_low.trainable = True discriminator_low.name = "discriminator_low" self.model = discriminator_low self.save_model = discriminator_low
Example #18
Source File: model.py From models with MIT License | 5 votes |
def __init__(self, model_file): self.model_file = model_file K.clear_session() # restart session self.model = load_model(model_file, compile=False) self.contrib_fns = {}
Example #19
Source File: core.py From pdftotree with MIT License | 5 votes |
def load_model(model_type, model_path): log = logging.getLogger(__name__) log.info("Loading pretrained {} model for table detection".format(model_type)) if model_type == "ml": model = pickle.load(open(model_path, "rb")) else: from keras.models import load_model as load_vision_model model = load_vision_model(model_path) log.info("Model loaded!") return model
Example #20
Source File: mark_detector.py From face_landmark_dnn with MIT License | 5 votes |
def __init__(self, mark_model=current_model): """Initialization""" # A face detector is required for mark detection. self.face_detector = FaceDetector() self.marks = None if mark_model.split(".")[1] == "pb": # Get a TensorFlow session ready to do landmark detection # Load a (frozen) Tensorflow model into memory. self.cnn_input_size = 64 detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(mark_model, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') self.graph = detection_graph self.sess = tf.Session(graph=detection_graph) else: self.cnn_input_size = 64 # with CustomObjectScope({'tf': tf}): with custom_object_scope({'smoothL1': smoothL1, 'relu6': relu6, 'DepthwiseConv2D': DepthwiseConv2D, 'mask_weights': mask_weights, 'tf': tf}): self.sess = load_model(mark_model)
Example #21
Source File: keras-tensorflow.py From DeepLearning-IDS with MIT License | 5 votes |
def load_model_csv(_model_name): #Change to your own path model = load_model('results_keras_tensorflow/models/{}'.format(_model_name)) return model
Example #22
Source File: neural_network.py From CalibrationNN with GNU General Public License v3.0 | 5 votes |
def __fromfile(self): file_name = self.file_name() + '.h5' if isfile(file_name): self.model = load_model(file_name) else: self.model = None
Example #23
Source File: keras_to_coreml.py From face_landmark_dnn with MIT License | 5 votes |
def keras_to_coreml(): with custom_object_scope({'smoothL1': smoothL1, 'relu6': relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D}): ml_model = load_model(MODEL_PATH) coreml_model = coremltools.converters.keras.convert(ml_model, input_names='image', image_input_names='image', is_bgr=False) coreml_model.save(ML_MODEL_PATH)
Example #24
Source File: keras_serializer.py From marvin-python-toolbox with Apache License 2.0 | 5 votes |
def _serializer_load(self, object_file_path): if object_file_path.split(os.sep)[-1] == 'model': from keras.models import load_model logger.debug("Loading model {} using keras serializer.".format(object_file_path)) return load_model(object_file_path) else: return super(KerasSerializer, self)._serializer_load(object_file_path)
Example #25
Source File: Stock_Prediction_Model_Stateless_LSTM.py From StockRecommendSystem with MIT License | 5 votes |
def load_training_model(self, window_len): # https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model model_file = self.paras.model_folder + self.get_model_name(window_len) + '.h5' if os.path.exists(model_file): print('load LSTM model...') return load_model(model_file) # creates a HDF5 file 'my_model.h5' return None
Example #26
Source File: emotion_predictor.py From twitter-emotion-recognition with GNU Affero General Public License v3.0 | 5 votes |
def _get_model(self): self._loaded_model_filename = 'models/{}{}-{}.h5'.format( 'unison-' if self.use_unison_model else '', self.classification, self.setting, ) return load_model(self._loaded_model_filename)
Example #27
Source File: model.py From models with MIT License | 5 votes |
def __init__(self, weights): self.nuc_dict = {'A':[1.0,0.0,0.0,0.0],'C':[0.0,1.0,0.0,0.0],'G':[0.0,0.0,1.0,0.0], 'U':[0.0,0.0,0.0,1.0], 'T':[0.0,0.0,0.0,1.0], 'N':[0.0,0.0,0.0,0.0], 'X':[1/4,1/4,1/4,1/4]} self.weights = weights self.model = load_model(weights, custom_objects={'FrameSliceLayer': FrameSliceLayer}) # One-hot encodes a particular sequence
Example #28
Source File: train.py From YOLO-3D-Box with MIT License | 5 votes |
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=True): '''create the training model''' image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors)//3 y_true = [Input(shape=(h//32, w//32, num_anchors, num_classes+5)), Input(shape=(h//16, w//16, num_anchors, num_classes+5)), Input(shape=(h//8, w//8, num_anchors, num_classes+5))] model_body = yolo_body(image_input, num_anchors, num_classes) if load_pretrained: weights_path = os.path.join('model_data', 'yolo_weights.h5') if not os.path.exists(weights_path): print("CREATING WEIGHTS FILE" + weights_path) yolo_path = os.path.join('model_data', 'yolo.h5') orig_model = load_model(yolo_path, compile=False) orig_model.save_weights(weights_path) model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) if freeze_body: # Do not freeze 3 output layers. for i in range(len(model_body.layers)-3): model_body.layers[i].trainable = False model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model_body, model
Example #29
Source File: train_ke.py From imgclsmob with MIT License | 5 votes |
def prepare_trainer(net, optimizer_name, momentum, lr, num_gpus, state_file_path=None): optimizer_name = optimizer_name.lower() if (optimizer_name == "sgd") or (optimizer_name == "nag"): optimizer = keras.optimizers.SGD( lr=lr, momentum=momentum, nesterov=(optimizer_name == "nag")) else: raise ValueError("Usupported optimizer: {}".format(optimizer_name)) backend_agnostic_compile( model=net, loss="categorical_crossentropy", optimizer=optimizer, metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy], num_gpus=num_gpus) if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path): net = load_model(filepath=state_file_path) return net
Example #30
Source File: keras-theano.py From DeepLearning-IDS with MIT License | 5 votes |
def load_model_csv(_model_name): # Change to your own path model = load_model( 'results_keras_theano/models/{}'.format(_model_name)) return model