Python tensorflow.keras.backend.clear_session() Examples
The following are 23
code examples of tensorflow.keras.backend.clear_session().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: model.py From keras-YOLOv3-model-set with MIT License | 6 votes |
def get_yolo2_inference_model(model_type, anchors, num_classes, weights_path=None, input_shape=None, confidence=0.1): '''create the inference model, for YOLOv2''' #K.clear_session() # get a new session num_anchors = len(anchors) image_shape = Input(shape=(2,), dtype='int64', name='image_shape') model_body, _ = get_yolo2_model(model_type, num_anchors, num_classes, input_shape=input_shape) print('Create YOLOv2 {} model with {} anchors and {} classes.'.format(model_type, num_anchors, num_classes)) if weights_path: model_body.load_weights(weights_path, by_name=False)#, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) boxes, scores, classes = Lambda(batched_yolo2_postprocess, name='yolo2_postprocess', arguments={'anchors': anchors, 'num_classes': num_classes, 'confidence': confidence})( [model_body.output, image_shape]) model = Model([model_body.input, image_shape], [boxes, scores, classes]) return model
Example #2
Source File: cnn_cifar_optuna_affinity.py From affinity-loss with MIT License | 6 votes |
def train(lambd, sigma, n_centers, trial): K.clear_session() (X_train, y_train), (X_test, y_test) = inbalanced_cifar(200) model = create_models(sigma, n_centers) model.compile("adam", affinity_loss(lambd), [acc]) tf.logging.set_verbosity(tf.logging.FATAL) # ログを埋めないようにする tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"] tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url) strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver) model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy) scheduler = LearningRateScheduler(step_decay) f1 = F1Callback(model, X_test, y_test, trial) history = model.fit(X_train, y_train, callbacks=[scheduler, f1], batch_size=640, epochs=100, verbose=0).history max_f1 = max(f1.f1_log) print(f"lambda:{lambd:.04}, sigma:{sigma:.04} n_centers:{n_centers} / f1 = {max_f1:.04}") return max_f1
Example #3
Source File: infer.py From stacks-usecase with Apache License 2.0 | 6 votes |
def infer(img): """inference function, accepts an abstract image file return generated image""" home_dir = get_directory() # load model backend.clear_session() gen_model = load_model(home_dir + "/models/generator_model.h5", compile=False) img = np.array(Image.open(img)) img = norm_data([img]) s_time = time.time() result = gen_model.predict(img[0].reshape(1, 256, 256, 3)) f_time = time.time() logger.info( "\033[92m" + "[INFO] " + "\033[0m" + "Inference done in: {:2.3f} seconds".format(f_time - s_time) ) # transform result from normalized to absolute values and convert to image object result = Image.fromarray(reverse_norm(result[0]), "RGB") # for debugging, uncomment the line below to inspect the generated image locally # result.save("generted_img.jpg", "JPEG") # convert image to bytes object to send it to the client binary_buffer = io.BytesIO() result.save(binary_buffer, format="JPEG") return b2a_base64(binary_buffer.getvalue())
Example #4
Source File: display_activations_test.py From keract with MIT License | 5 votes |
def setUp(self) -> None: K.clear_session()
Example #5
Source File: model.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def get_yolo2_train_model(model_type, anchors, num_classes, weights_path=None, freeze_level=1, optimizer=Adam(lr=1e-3, decay=1e-6), label_smoothing=0, model_pruning=False, pruning_end_step=10000): '''create the training model, for YOLOv2''' #K.clear_session() # get a new session num_anchors = len(anchors) # y_true in form of relative x, y, w, h, objectness, class y_true_input = Input(shape=(None, None, num_anchors, 6)) model_body, backbone_len = get_yolo2_model(model_type, num_anchors, num_classes, model_pruning=model_pruning, pruning_end_step=pruning_end_step) print('Create YOLOv2 {} model with {} anchors and {} classes.'.format(model_type, num_anchors, num_classes)) print('model layer number:', len(model_body.layers)) if weights_path: model_body.load_weights(weights_path, by_name=True)#, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_level in [1, 2]: # Freeze the backbone part or freeze all but final feature map & input layers. num = (backbone_len, len(model_body.layers)-2)[freeze_level-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) elif freeze_level == 0: # Unfreeze all layers. for i in range(len(model_body.layers)): model_body.layers[i].trainable= True print('Unfreeze all of the layers.') model_loss, location_loss, confidence_loss, class_loss = Lambda(yolo2_loss, name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'label_smoothing': label_smoothing})( [model_body.output, y_true_input]) model = Model([model_body.input, y_true_input], model_loss) loss_dict = {'location_loss':location_loss, 'confidence_loss':confidence_loss, 'class_loss':class_loss} add_metrics(model, loss_dict) model.compile(optimizer=optimizer, loss={ # use custom yolo_loss Lambda layer. 'yolo_loss': lambda y_true, y_pred: y_pred}) return model
Example #6
Source File: tf_backend.py From DeepPavlov with Apache License 2.0 | 5 votes |
def __call__(cls, *args, **kwargs): obj = cls.__new__(cls, *args, **kwargs) from .keras_model import KerasModel if issubclass(cls, KerasModel): from tensorflow.keras import backend as K if K.backend() != 'tensorflow': obj.__init__(*args, **kwargs) return obj K.clear_session() obj.graph = tf.Graph() with obj.graph.as_default(): if hasattr(cls, '_config_session'): obj.sess = cls._config_session() else: obj.sess = tf.Session() else: obj.graph = tf.Graph() for meth in dir(obj): if meth == '__class__': continue attr = getattr(obj, meth) if callable(attr): if issubclass(cls, KerasModel): wrapped_attr = _keras_wrap(attr, obj.sess) else: wrapped_attr = _graph_wrap(attr, obj.graph) setattr(obj, meth, wrapped_attr) obj.__init__(*args, **kwargs) return obj
Example #7
Source File: persist_load_test.py From keract with MIT License | 5 votes |
def setUp(self) -> None: K.clear_session()
Example #8
Source File: get_activations_test.py From keract with MIT License | 5 votes |
def tearDown(self) -> None: K.clear_session()
Example #9
Source File: get_activations_test.py From keract with MIT License | 5 votes |
def setUp(self) -> None: K.clear_session()
Example #10
Source File: display_activations_test.py From keract with MIT License | 5 votes |
def tearDown(self) -> None: K.clear_session() for image in glob('*.png'): os.remove(image)
Example #11
Source File: ReadAnalogNeedleClass.py From water-meter-system-complete with MIT License | 5 votes |
def ReadoutSingleImage(self, image): test_image = image.resize((32, 32), Image.NEAREST) test_image.save('./image_tmp/resize.jpg', "JPEG") test_image = np.array(test_image, dtype="float32") img = np.reshape(test_image,[1,32,32,3]) classes = self.model.predict(img) out_sin = classes[0][0] out_cos = classes[0][1] K.clear_session() result = np.arctan2(out_sin, out_cos)/(2*math.pi) % 1 result = result * 10 return result
Example #12
Source File: conftest.py From ivis with GNU General Public License v2.0 | 5 votes |
def clear_session_after_test(): """Test wrapper to clean up after TensorFlow and CNTK tests. This wrapper runs for all the tests in the keras test suite. """ yield if K.backend() == 'tensorflow' or K.backend() == 'cntk': K.clear_session()
Example #13
Source File: test_keras_model_io.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def test_trainable_model_from_file(keras_model, project_manager): skl = KerasModel(artifact=keras_model) skl.store(name='nn') K.clear_session() trainable = TrainableModel.from_file(run_number=1, name='nn', model_type='keras') assert isinstance(trainable.model, KerasBaseModel) for root, dirs, files in os.walk(project_manager.CONFIG['saved-models']): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) with open(os.path.join(project_manager.CONFIG['saved-models'], '.gitkeep'), 'w') as gitkeep: gitkeep.write('empty')
Example #14
Source File: test_keras_model_io.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def test_loader(keras_model, project_manager): skl = KerasModel(artifact=keras_model) skl.store(name='nn') K.clear_session() reloaded = skl.load(name='nn') assert isinstance(reloaded, KerasBaseModel) for root, dirs, files in os.walk(project_manager.CONFIG['saved-models']): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) with open(os.path.join(project_manager.CONFIG['saved-models'], '.gitkeep'), 'w') as gitkeep: gitkeep.write('empty')
Example #15
Source File: test_keras_model_io.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def keras_test(func): """Function wrapper to clean up after TensorFlow tests. # Arguments func: test function to clean up after. # Returns A function wrapping the input function. """ @six.wraps(func) def wrapper(*args, **kwargs): output = func(*args, **kwargs) K.clear_session() return output return wrapper
Example #16
Source File: private_model_test.py From tf-encrypted with Apache License 2.0 | 5 votes |
def tearDown(self): K.clear_session()
Example #17
Source File: private_model_test.py From tf-encrypted with Apache License 2.0 | 5 votes |
def setUp(self): K.clear_session()
Example #18
Source File: convert_test.py From tf-encrypted with Apache License 2.0 | 5 votes |
def tearDown(self): global _GLOBAL_FILENAME tf.reset_default_graph() K.clear_session() logging.debug("Cleaning file: %s", _GLOBAL_FILENAME) os.remove(_GLOBAL_FILENAME) logging.getLogger().setLevel(self.previous_logging_level)
Example #19
Source File: ReadDigitalDigitClass.py From water-meter-system-complete with MIT License | 5 votes |
def ReadoutSingleImage(self, image): test_image = image.resize((20, 32), Image.NEAREST) test_image.save('./image_tmp/resize.jpg', "JPEG") test_image = np.array(test_image, dtype="float32") img = np.reshape(test_image,[1,32,20,3]) result = self.model.predict_classes(img) K.clear_session() if result == 10: result = "NaN" else: result = result[0] return result
Example #20
Source File: asr_solver.py From delta with Apache License 2.0 | 4 votes |
def train_and_eval(self): ''' train and eval ''' # data must be init before model builg #backend_sess = K.get_session() train_ds, train_task = self.input_data(mode=utils.TRAIN) #train_gen = self.input_generator(tf.data.make_one_shot_iterator(train_ds), train_task, backend_sess, mode=utils.TRAIN) eval_ds, eval_task = self.input_data(mode=utils.EVAL) #eval_gen = self.input_generator(tf.data.make_one_shot_iterator(eval_ds), eval_task, backend_sess, mode=utils.EVAL) self.model_fn(mode=utils.TRAIN) assert self._built callbacks = self.get_callbacks( eval_ds, eval_task, monitor_used=self._monitor_used) try: # Run training self.active_model.fit_generator( train_task, steps_per_epoch=len(train_task), epochs=self._num_epochs, verbose=1, callbacks=callbacks, validation_data=eval_task, validation_steps=len(eval_task), validation_freq=1, class_weight=None, max_queue_size=100, workers=4, use_multiprocessing=False, shuffle=True, initial_epoch=self._init_epoch) #save model # not work for subclassed model, using tf.keras.experimental.export_saved_model #self.save_model() except (Exception, ArithmeticError) as err: #pylint: disable=broad-except template = "An exception of type {0} occurred. Arguments:\n{1!r}" message = template.format(type(err).__name__, err.args) logging.error(message) raise err finally: # Clear memory K.clear_session() logging.info("Ending time: {}".format( datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) #pylint: disable=unused-argument,too-many-locals
Example #21
Source File: model.py From keras-YOLOv3-model-set with MIT License | 4 votes |
def get_yolo3_train_model(model_type, anchors, num_classes, weights_path=None, freeze_level=1, optimizer=Adam(lr=1e-3, decay=0), label_smoothing=0, model_pruning=False, pruning_end_step=10000): '''create the training model, for YOLOv3''' #K.clear_session() # get a new session num_anchors = len(anchors) #YOLOv3 model has 9 anchors and 3 feature layers but #Tiny YOLOv3 model has 6 anchors and 2 feature layers, #so we can calculate feature layers number to get model type num_feature_layers = num_anchors//3 #feature map target value, so its shape should be like: # [ # (image_height/32, image_width/32, 3, num_classes+5), # (image_height/16, image_width/16, 3, num_classes+5), # (image_height/8, image_width/8, 3, num_classes+5) # ] y_true = [Input(shape=(None, None, 3, num_classes+5), name='y_true_{}'.format(l)) for l in range(num_feature_layers)] model_body, backbone_len = get_yolo3_model(model_type, num_feature_layers, num_anchors, num_classes, model_pruning=model_pruning, pruning_end_step=pruning_end_step) print('Create {} {} model with {} anchors and {} classes.'.format('Tiny' if num_feature_layers==2 else '', model_type, num_anchors, num_classes)) print('model layer number:', len(model_body.layers)) if weights_path: model_body.load_weights(weights_path, by_name=True)#, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_level in [1, 2]: # Freeze the backbone part or freeze all but final feature map & input layers. num = (backbone_len, len(model_body.layers)-3)[freeze_level-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) elif freeze_level == 0: # Unfreeze all layers. for i in range(len(model_body.layers)): model_body.layers[i].trainable= True print('Unfreeze all of the layers.') model_loss, location_loss, confidence_loss, class_loss = Lambda(yolo3_loss, name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5, 'label_smoothing': label_smoothing})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) loss_dict = {'location_loss':location_loss, 'confidence_loss':confidence_loss, 'class_loss':class_loss} add_metrics(model, loss_dict) model.compile(optimizer=optimizer, loss={ # use custom yolo_loss Lambda layer. 'yolo_loss': lambda y_true, y_pred: y_pred}) return model
Example #22
Source File: model.py From keras-YOLOv3-model-set with MIT License | 4 votes |
def get_yolo3_inference_model(model_type, anchors, num_classes, weights_path=None, input_shape=None, confidence=0.1): '''create the inference model, for YOLOv3''' #K.clear_session() # get a new session num_anchors = len(anchors) #YOLOv3 model has 9 anchors and 3 feature layers but #Tiny YOLOv3 model has 6 anchors and 2 feature layers, #so we can calculate feature layers number to get model type num_feature_layers = num_anchors//3 image_shape = Input(shape=(2,), dtype='int64', name='image_shape') model_body, _ = get_yolo3_model(model_type, num_feature_layers, num_anchors, num_classes, input_shape=input_shape) print('Create {} YOLOv3 {} model with {} anchors and {} classes.'.format('Tiny' if num_feature_layers==2 else '', model_type, num_anchors, num_classes)) if weights_path: model_body.load_weights(weights_path, by_name=False)#, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) boxes, scores, classes = Lambda(batched_yolo3_postprocess, name='yolo3_postprocess', arguments={'anchors': anchors, 'num_classes': num_classes, 'confidence': confidence})( [*model_body.output, image_shape]) model = Model([model_body.input, image_shape], [boxes, scores, classes]) return model #def get_yolo3_prenms_model(model_type, anchors, num_classes, weights_path=None, input_shape=None): #'''create the prenms model, for YOLOv3''' ##K.clear_session() # get a new session #num_anchors = len(anchors) ##YOLOv3 model has 9 anchors and 3 feature layers but ##Tiny YOLOv3 model has 6 anchors and 2 feature layers, ##so we can calculate feature layers number to get model type #num_feature_layers = num_anchors//3 #image_shape = Input(shape=(2,), dtype='int64', name='image_shape') #model_body, _ = get_yolo3_model(model_type, num_feature_layers, num_anchors, num_classes, input_shape=input_shape) #print('Create {} YOLOv3 {} model with {} anchors and {} classes.'.format('Tiny' if num_feature_layers==2 else '', model_type, num_anchors, num_classes)) #if weights_path: #model_body.load_weights(weights_path, by_name=False)#, skip_mismatch=True) #print('Load weights {}.'.format(weights_path)) #boxes, box_scores = Lambda(batched_yolo3_prenms, name='yolo3_prenms', #arguments={'anchors': anchors, 'num_classes': num_classes, 'input_shape': input_shape[:2]})( #[*model_body.output, image_shape]) ##boxes, box_scores = Yolo3PostProcessLayer(anchors, num_classes, input_dim=input_shape[:2], name='yolo3_prenms')([model_body.output, image_shape]) #model = Model([model_body.input, image_shape], [boxes, box_scores]) #return model
Example #23
Source File: qconvolutional_test.py From qkeras with Apache License 2.0 | 4 votes |
def test_qconv1d(): np.random.seed(33) x = Input((4, 4,)) y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model = Model(inputs=x, outputs=y) # Extract model operations model_ops = extract_model_operations(model) # Assertion about the number of operations for this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats to make sure it works with Conv1D layer print_qstats(model) # reload the model to ensure saving/loading works # json_string = model.to_json() # clear_session() # model = quantized_model_from_json(json_string) for layer in model.layers: all_weights = [] for i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 10 * 10 shape = weights.shape assert input_size > 0, 'input size for {} {}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # Save the model as an h5 file using Keras's model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname) del model # Delete the existing model # Return a compiled model identical to the previous one model = load_qmodel(fname) # Clean the created h5 file after loading the model os.close(fd) os.remove(fname) # apply quantizer to weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y)