Python keras.utils.vis_utils.plot_model() Examples
The following are 14
code examples of keras.utils.vis_utils.plot_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.utils.vis_utils
, or try the search function
.
Example #1
Source File: helper_functions.py From fast-neural-style-keras with MIT License | 6 votes |
def predict(options, img_read_path, img_write_path): # Read image content = process_image(img_read_path, -1, -1, resize=False) ori_height = content.shape[1] ori_width = content.shape[2] # Pad image content = get_padding(content) height = content.shape[1] width = content.shape[2] # Get eval model eval_model = get_evaluate_model(width, height) eval_model.load_weights(options['weights_read_path']) # If flag is set, print model summary and generate model description if options["plot_model"]: eval_model.summary() plot_model(eval_model, to_file='model.png') # Generate output and save image res = eval_model.predict([content]) output = deprocess_image(res[0], width, height) output = remove_padding(output, ori_height, ori_width) imwrite(img_write_path, output)
Example #2
Source File: fcn.py From lunania-ai with MIT License | 6 votes |
def train(self, dataset, epoches): sample_count, generator = createDataGenerater(dataset) history = self.model.fit_generator( generator(), steps_per_epoch=sample_count, epochs=epoches, verbose=1 ) save_dir = join_path(config.model_dir, self.model_type + '-' + str(config.classes) + 'class-' + str(self.epoch) + 'epoch-' + datetime.now().strftime("%Y%m%d%H%M%S")) if not os.path.exists(save_dir): os.mkdir(save_dir) logger.info('model path: %s', save_dir) with open(os.path.join(save_dir, self.MODEL_FILE_NAME), mode='w', encoding='utf-8') as model_file: model_file.write(self.model.to_yaml()) vis_utils.plot_model(self.model, to_file=os.path.join(save_dir, self.VISUALIZED_MODEL_FILE_NAME), show_shapes=True) self.model.save_weights(os.path.join(save_dir, self.WEIGHTS_FILE_NAME)) self.model.save(os.path.join(save_dir, self.ALL_IN_MODEL_FILE_NAME))
Example #3
Source File: keras_utils.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 5 votes |
def save_model_figure(model, file_path='/.model.eps'): vis_utils.plot_model(model, file_path, show_shapes=True, show_layer_names=True)
Example #4
Source File: keras_utils.py From timeception with GNU General Public License v3.0 | 5 votes |
def save_model_figure(model, file_path='/.model.eps'): vis_utils.plot_model(model, file_path, show_shapes=True, show_layer_names=True)
Example #5
Source File: classification_bigbench_keras.py From deepsim with MIT License | 5 votes |
def model_summary(): X_left = Input((dim, dim, bin_vec_dim)) X_right = Input((dim, dim, bin_vec_dim)) predictions = classification(X_left, X_right) model = Model(inputs=[X_left, X_right], outputs=predictions) model.compile(optimizer=K.optimizers.adam(lr=0.0005), loss=K.losses.binary_crossentropy, metrics=['accuracy']) # plot_model(model, to_file='./result/plot/whole_model.png', show_shapes=True)
Example #6
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #7
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #8
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #9
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #10
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #11
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #12
Source File: vis_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_plot_model(): model = Sequential() model.add(Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(Flatten(name='flat')) model.add(Dense(5, name='dense1')) vis_utils.plot_model(model, to_file='model1.png', show_layer_names=False) os.remove('model1.png') model = Sequential() model.add(LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add(TimeDistributed(Dense(5, name='dense2'))) vis_utils.plot_model(model, to_file='model2.png', show_shapes=True) os.remove('model2.png')
Example #13
Source File: keras_utils.py From videograph with GNU General Public License v3.0 | 5 votes |
def plot_model(model, file_path='model.eps'): vis_utils.plot_model(model, file_path, show_shapes=True, show_layer_names=True)
Example #14
Source File: mobilenet_v2.py From MobileNetV2 with MIT License | 4 votes |
def MobileNetv2(input_shape, k, alpha=1.0): """MobileNetv2 This function defines a MobileNetv2 architectures. # Arguments input_shape: An integer or tuple/list of 3 integers, shape of input tensor. k: Integer, number of classes. alpha: Integer, width multiplier, better in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]. # Returns MobileNetv2 model. """ inputs = Input(shape=input_shape) first_filters = _make_divisible(32 * alpha, 8) x = _conv_block(inputs, first_filters, (3, 3), strides=(2, 2)) x = _inverted_residual_block(x, 16, (3, 3), t=1, alpha=alpha, strides=1, n=1) x = _inverted_residual_block(x, 24, (3, 3), t=6, alpha=alpha, strides=2, n=2) x = _inverted_residual_block(x, 32, (3, 3), t=6, alpha=alpha, strides=2, n=3) x = _inverted_residual_block(x, 64, (3, 3), t=6, alpha=alpha, strides=2, n=4) x = _inverted_residual_block(x, 96, (3, 3), t=6, alpha=alpha, strides=1, n=3) x = _inverted_residual_block(x, 160, (3, 3), t=6, alpha=alpha, strides=2, n=3) x = _inverted_residual_block(x, 320, (3, 3), t=6, alpha=alpha, strides=1, n=1) if alpha > 1.0: last_filters = _make_divisible(1280 * alpha, 8) else: last_filters = 1280 x = _conv_block(x, last_filters, (1, 1), strides=(1, 1)) x = GlobalAveragePooling2D()(x) x = Reshape((1, 1, last_filters))(x) x = Dropout(0.3, name='Dropout')(x) x = Conv2D(k, (1, 1), padding='same')(x) x = Activation('softmax', name='softmax')(x) output = Reshape((k,))(x) model = Model(inputs, output) # plot_model(model, to_file='images/MobileNetv2.png', show_shapes=True) return model