Python keras.optimizers.Optimizer() Examples
The following are 7
code examples of keras.optimizers.Optimizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.optimizers
, or try the search function
.
Example #1
Source File: workers.py From dist-keras with GNU General Public License v3.0 | 6 votes |
def __init__(self, model, optimizer, loss, loss_weights, metrics=["accuracy"], features_col="features", label_col="label", batch_size=32, num_epoch=1, learning_rate=1.0): assert isinstance(optimizer, (str, Optimizer)), "'optimizer' must be a string or a Keras Optimizer instance" assert isinstance(features_col, (str, list)), "'features_col' must be a string or a list of strings" assert isinstance(label_col, (str, list)), "'label_col' must be a string or a list of strings" self.model = model self.optimizer = {'class_name': optimizer, 'config': {}} if isinstance(optimizer, str) else serialize(optimizer) self.loss = loss self.loss_weights = loss_weights self.metrics= metrics self.features_column = [features_col] if isinstance(features_col, str) else features_col self.label_column = [label_col] if isinstance(label_col, str) else label_col self.batch_size = batch_size self.num_epoch = num_epoch self.max_mini_batches = 100 self.prefetching_thread = None self.mini_batches = None self.is_prefetching = True self.worker_id = -1 self.learning_rate = learning_rate self.num_inputs = len(self.features_column) self.num_outputs = len(self.label_column) self.current_epoch = 0
Example #2
Source File: train.py From CSBDeep with BSD 3-Clause "New" or "Revised" License | 5 votes |
def prepare_model(model, optimizer, loss, metrics=('mse','mae'), loss_bg_thresh=0, loss_bg_decay=0.06, Y=None): """ TODO """ from keras.optimizers import Optimizer isinstance(optimizer,Optimizer) or _raise(ValueError()) loss_standard = eval('loss_%s()'%loss) _metrics = [eval('loss_%s()'%m) for m in metrics] callbacks = [TerminateOnNaN()] # checks assert 0 <= loss_bg_thresh <= 1 assert loss_bg_thresh == 0 or Y is not None if loss == 'laplace': assert K.image_data_format() == "channels_last", "TODO" assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0 # loss if loss_bg_thresh == 0: _loss = loss_standard else: freq = np.mean(Y > loss_bg_thresh) # print("class frequency:", freq) alpha = K.variable(1.0) loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss)) _loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh, 0.5 / (0.1 + (1 - freq)), 0.5 / (0.1 + freq), alpha) callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha')) if not loss in metrics: _metrics.append(loss_standard) # compile model model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics) return callbacks
Example #3
Source File: _multigpu_with_nccl.py From keras_experiments with The Unlicense | 5 votes |
def compile(self, *args, **kwargs): '''Refer to Model.compile docstring for parameters. Override functionality is documented below. :override compile: Override Model.compile method to check for options that the optimizer is multi-gpu enabled, and synchronize initial variables. ''' initsync = self._initsync usenccl = self._usenccl opt = kwargs['optimizer'] # if isinstance(opt, str): if not isinstance(opt, KO.Optimizer): opt = KO.get(opt) kwargs['optimizer'] = opt if self._syncopt and not getattr(opt, 'ismgpu', False): raise RuntimeError( 'Multi-GPU synchronization model requires a multi-GPU ' 'optimizer. Instead got: {}'.format(opt)) opt.usenccl = usenccl if self._enqueue_ops: # Produces a warning that kwargs are ignored for Tensorflow. Patch # Function in tensorflow_backend to use the enqueue_ops option. kwargs['fetches'] = self._enqueue_ops super(ModelMGPU, self).compile(*args, **kwargs) if initsync: self._run_initsync()
Example #4
Source File: test_assembler.py From entity_embeddings_categorical with MIT License | 5 votes |
def check_model_parameters(self, model: Model, optimizer: Optimizer, loss: str, metrics) -> None: self.assertIsInstance(model.optimizer, optimizer) self.assertEqual(model.loss, loss) self.assertEqual(model.metrics, metrics)
Example #5
Source File: net.py From speechless with MIT License | 4 votes |
def __init__(self, input_size_per_time_step: int, allowed_characters: List[chr], use_raw_wave_input: bool = False, activation: str = "relu", output_activation: str = "softmax", optimizer: Optimizer = Adam(1e-4), dropout: Optional[float] = None, load_model_from_directory: Optional[Path] = None, load_epoch: Optional[int] = None, allowed_characters_for_loaded_model: Optional[List[chr]] = None, frozen_layer_count: int = 0, reinitialize_trainable_loaded_layers: bool = False, use_asg: bool = False, asg_transition_probabilities: Optional[ndarray] = None, asg_initial_probabilities: Optional[ndarray] = None, kenlm_directory: Path = None): if frozen_layer_count > 0 and load_model_from_directory is None: raise ValueError("Layers cannot be frozen if model is trained from scratch.") self.kenlm_directory = kenlm_directory self.grapheme_encoding = AsgGraphemeEncoding(allowed_characters=allowed_characters) \ if use_asg else CtcGraphemeEncoding(allowed_characters=allowed_characters) self.asg_transition_probabilities = self._default_asg_transition_probabilities( self.grapheme_encoding.grapheme_set_size) \ if asg_transition_probabilities is None else asg_transition_probabilities self.asg_initial_probabilities = self._default_asg_initial_probabilities( self.grapheme_encoding.grapheme_set_size) \ if asg_initial_probabilities is None else asg_initial_probabilities self.use_asg = use_asg self.frozen_layer_count = frozen_layer_count self.output_activation = output_activation self.activation = activation self.use_raw_wave_input = use_raw_wave_input self.input_size_per_time_step = input_size_per_time_step self.optimizer = optimizer self.load_epoch = load_epoch self.dropout = dropout self.predictive_net = self.create_predictive_net() self.prediction_phase_flag = 0. if self.kenlm_directory is not None: expected_characters = list( single(read_text(self.kenlm_directory / "vocabulary", encoding='utf8').splitlines()).lower()) if allowed_characters != expected_characters: raise ValueError("Allowed characters {} differ from those expected by kenlm decoder: {}". format(allowed_characters, expected_characters)) if load_model_from_directory is not None: self.load_weights( allowed_characters_for_loaded_model, load_epoch, load_model_from_directory, loaded_first_layers_count=frozen_layer_count if reinitialize_trainable_loaded_layers else None)
Example #6
Source File: model_inceptionresnet.py From cvpr-2018-autonomous-driving-autopilot-solution with MIT License | 4 votes |
def compile(self, learning_rate, momentum): """Gets the model ready for training. Adds losses, regularization, and metrics. Then calls the Keras compile() function. """ # Optimizer object # optimizer = keras.optimizers.SGD( # lr=learning_rate, momentum=momentum, # clipnorm=self.config.GRADIENT_CLIP_NORM) if self.config.OPTIMIZER == 'Adam': optimizer = keras.optimizers.Adam(lr=learning_rate, epsilon = self.config.EPSILON) elif self.config.OPTIMIZER == 'SGD': optimizer = keras.optimizers.SGD( lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM) else: optimizer = SGDAccum( lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM, accum_iters=self.config.ACCUM_ITERS) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} loss_names = [ "rpn_class_loss", "rpn_bbox_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.add_loss(loss) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. reg_losses = [ keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) for w in self.keras_model.trainable_weights if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile self.keras_model.compile( optimizer=optimizer, loss=[None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: if name in self.keras_model.metrics_names: continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.metrics_tensors.append(loss)
Example #7
Source File: model.py From cvpr-2018-autonomous-driving-autopilot-solution with MIT License | 4 votes |
def compile(self, learning_rate, momentum): """Gets the model ready for training. Adds losses, regularization, and metrics. Then calls the Keras compile() function. """ # Optimizer object # optimizer = keras.optimizers.SGD( # lr=learning_rate, momentum=momentum, # clipnorm=self.config.GRADIENT_CLIP_NORM) if self.config.OPTIMIZER == 'Adam': optimizer = keras.optimizers.Adam(lr=learning_rate, epsilon = self.config.EPSILON) elif self.config.OPTIMIZER == 'SGD': optimizer = keras.optimizers.SGD( lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM) else: optimizer = SGDAccum( lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM, accum_iters=self.config.ACCUM_ITERS) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} loss_names = [ "rpn_class_loss", "rpn_bbox_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.add_loss(loss) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. reg_losses = [ keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) for w in self.keras_model.trainable_weights if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile self.keras_model.compile( optimizer=optimizer, loss=[None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: if name in self.keras_model.metrics_names: continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.metrics_tensors.append(loss)