Python keras.losses.categorical_crossentropy() Examples
The following are 30
code examples of keras.losses.categorical_crossentropy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.losses
, or try the search function
.
Example #1
Source File: transfer_learning.py From hyperspectral_deeplearning_review with GNU General Public License v3.0 | 6 votes |
def get_model_compiled(args, inputshape, num_class): model = Sequential() if args.arch == "CNN1D": model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape)) model.add(MaxPooling1D(pool_size=5)) model.add(Flatten()) model.add(Dense(100)) elif "CNN2D" in args.arch: model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape)) model.add(Activation('relu')) model.add(Conv2D(100, (5, 5))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100)) elif args.arch == "CNN3D": model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv3D(64, (5, 5, 16))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 1))) model.add(Flatten()) model.add(Dense(300)) if args.arch != "CNN2D": model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(num_class, activation='softmax')) model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy']) return model
Example #2
Source File: crf_losses.py From keras-contrib with MIT License | 6 votes |
def crf_loss(y_true, y_pred): """General CRF loss function depending on the learning mode. # Arguments y_true: tensor with true targets. y_pred: tensor with predicted targets. # Returns If the CRF layer is being trained in the join mode, returns the negative log-likelihood. Otherwise returns the categorical crossentropy implemented by the underlying Keras backend. # About GitHub If you open an issue or a pull request about CRF, please add `cc @lzfelix` to notify Luiz Felix. """ crf, idx = y_pred._keras_history[:2] if crf.learn_mode == 'join': return crf_nll(y_true, y_pred) else: if crf.sparse_target: return sparse_categorical_crossentropy(y_true, y_pred) else: return categorical_crossentropy(y_true, y_pred)
Example #3
Source File: keras_bert_layer.py From nlp_xiaojiang with MIT License | 6 votes |
def crf_loss(y_true, y_pred): """General CRF loss function depending on the learning mode. # Arguments y_true: tensor with true targets. y_pred: tensor with predicted targets. # Returns If the CRF layer is being trained in the join mode, returns the negative log-likelihood. Otherwise returns the categorical crossentropy implemented by the underlying Keras backend. # About GitHub If you open an issue or a pull request about CRF, please add `cc @lzfelix` to notify Luiz Felix. """ crf, idx = y_pred._keras_history[:2] if crf.learn_mode == 'join': return crf_nll(y_true, y_pred) else: if crf.sparse_target: return sparse_categorical_crossentropy(y_true, y_pred) else: return categorical_crossentropy(y_true, y_pred) # crf_marginal_accuracy, crf_viterbi_accuracy
Example #4
Source File: params.py From talos with MIT License | 6 votes |
def iris(): from keras.optimizers import Adam, Nadam from keras.losses import logcosh, categorical_crossentropy from keras.activations import relu, elu, softmax # here use a standard 2d dictionary for inputting the param boundaries p = {'lr': (0.5, 5, 10), 'first_neuron': [4, 8, 16, 32, 64], 'hidden_layers': [0, 1, 2, 3, 4], 'batch_size': (2, 30, 10), 'epochs': [2], 'dropout': (0, 0.5, 5), 'weight_regulizer': [None], 'emb_output_dims': [None], 'shapes': ['brick', 'triangle', 0.2], 'optimizer': [Adam, Nadam], 'losses': [logcosh, categorical_crossentropy], 'activation': [relu, elu], 'last_activation': [softmax]} return p
Example #5
Source File: cnn3d.py From hyperspectral_deeplearning_review with GNU General Public License v3.0 | 6 votes |
def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3): clf = Sequential() clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput)) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Conv3D(64, (5, 5, 16))) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(MaxPooling3D(pool_size=(2, 2, 1))) clf.add(Flatten()) clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay))) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Dense(num_class, activation='softmax')) clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy']) return clf
Example #6
Source File: models.py From Federated-Learning-Mini-Framework with MIT License | 6 votes |
def create_model(input_shape: tuple, nb_classes: int, init_with_imagenet: bool = False, learning_rate: float = 0.01): weights = None if init_with_imagenet: weights = "imagenet" model = VGG16(input_shape=input_shape, classes=nb_classes, weights=weights, include_top=False) # "Shallow" VGG for Cifar10 x = model.get_layer('block3_pool').output x = layers.Flatten(name='Flatten')(x) x = layers.Dense(512, activation='relu')(x) x = layers.Dense(nb_classes)(x) x = layers.Softmax()(x) model = models.Model(model.input, x) loss = losses.categorical_crossentropy optimizer = optimizers.SGD(lr=learning_rate, decay=0.99) model.compile(optimizer, loss, metrics=["accuracy"]) return model
Example #7
Source File: neuralnets.py From EmoPy with GNU Affero General Public License v3.0 | 6 votes |
def _init_model(self): """ Initialize base model from Keras and add top layers to match number of training emotions labels. :return: """ base_model = self._get_base_model() top_layer_model = base_model.output top_layer_model = GlobalAveragePooling2D()(top_layer_model) top_layer_model = Dense(1024, activation='relu')(top_layer_model) prediction_layer = Dense(output_dim=len(self.emotion_map.keys()), activation='softmax')(top_layer_model) model = Model(input=base_model.input, output=prediction_layer) print(model.summary()) for layer in base_model.layers: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) self.model = model
Example #8
Source File: neuralnets.py From EmoPy with GNU Affero General Public License v3.0 | 6 votes |
def fit(self, features, labels, validation_split, epochs=50): """ Trains the neural net on the data provided. :param features: Numpy array of training data. :param labels: Numpy array of target (label) data. :param validation_split: Float between 0 and 1. Percentage of training data to use for validation :param epochs: Max number of times to train over dataset. """ self.model.fit(x=features, y=labels, epochs=epochs, verbose=1, callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)], validation_split=validation_split, shuffle=True) for layer in self.model.layers[:self._NUM_BOTTOM_LAYERS_TO_RETRAIN]: layer.trainable = False for layer in self.model.layers[self._NUM_BOTTOM_LAYERS_TO_RETRAIN:]: layer.trainable = True self.model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) self.model.fit(x=features, y=labels, epochs=50, verbose=1, callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)], validation_split=validation_split, shuffle=True)
Example #9
Source File: HandWritingRecognition.py From Jtyoui with MIT License | 6 votes |
def nn_model(): (x_train, y_train), _ = mnist.load_data() # 归一化 x_train = x_train.reshape(x_train.shape[0], -1) / 255. # one-hot y_train = np_utils.to_categorical(y=y_train, num_classes=10) # constant(value=1.)自定义常数,constant(value=1.)===one() # 创建模型:输入784个神经元,输出10个神经元 model = Sequential([ Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh), Dense(units=100, bias_initializer=one(), activation=tanh), Dense(units=10, bias_initializer=one(), activation=softmax), ]) opt = SGD(lr=0.2, clipnorm=1.) # 优化器 model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae']) # 编译 model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()]) model_save(model, './model.h5')
Example #10
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_bad_shape(): a = np.random.random((2, 3, 5)) with pytest.raises(ValueError) as exc: _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, 3, 6)]) assert 'targets to have the same shape' in str(exc)
Example #11
Source File: one_hot_model.py From tying-wv-and-wc with MIT License | 5 votes |
def perplexity(cls, y_true, y_pred): cross_entropy = K.mean(K.categorical_crossentropy(y_pred, y_true), axis=-1) perplexity = K.exp(cross_entropy) return perplexity
Example #12
Source File: __init__.py From transformer-word-segmenter with Apache License 2.0 | 5 votes |
def label_smoothing_loss(y_true, y_pred): shape = K.int_shape(y_pred) n_class = shape[2] eps = 0.1 y_true = y_true * (1 - eps) + eps / n_class return categorical_crossentropy(y_true, y_pred)
Example #13
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_last_is_one(): a = np.random.random((2, 3, 1)) with pytest.raises(ValueError) as exc: _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape]) assert 'You are passing a target array' in str(exc)
Example #14
Source File: __init__.py From transformer-word-segmenter with Apache License 2.0 | 5 votes |
def __build_model(self): assert self.max_depth >= 1, "The parameter max_depth is at least 1" src_seq_input = Input(shape=(self.max_seq_len,), dtype="int32", name="src_seq_input") mask = Lambda(lambda x: padding_mask(x, x))(src_seq_input) emb_output = self.__input(src_seq_input) enc_output = self.__encoder(emb_output, mask) if self.use_crf: crf = CRF(self.tgt_vocab_size + 1, sparse_target=self.sparse_target) y_pred = crf(self.__output(enc_output)) else: y_pred = self.__output(enc_output) model = Model(inputs=[src_seq_input], outputs=[y_pred]) parallel_model = model if self.num_gpu > 1: parallel_model = multi_gpu_model(model, gpus=self.num_gpu) if self.use_crf: parallel_model.compile(self.optimizer, loss=crf_loss, metrics=[crf_accuracy]) else: confidence_penalty = K.mean( self.confidence_penalty_weight * K.sum(y_pred * K.log(y_pred), axis=-1)) model.add_loss(confidence_penalty) parallel_model.compile(optimizer=self.optimizer, loss=categorical_crossentropy, metrics=['accuracy']) return model, parallel_model
Example #15
Source File: neuralnets.py From EmoPy with GNU Affero General Public License v3.0 | 5 votes |
def fit_generator(self, generator, validation_data=None, epochs=50): #self.model.compile(optimizer="RMSProp", loss="cosine_proximity", metrics=["accuracy"]) self.model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7), loss=categorical_crossentropy, metrics=['accuracy']) self.model.fit_generator(generator=generator, validation_data=validation_data, epochs=epochs, callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3), PlotLosses()])
Example #16
Source File: neuralnets.py From EmoPy with GNU Affero General Public License v3.0 | 5 votes |
def fit(self, image_data, labels, validation_split, epochs=50): """ Trains the neural net on the data provided. :param image_data: Numpy array of training data. :param labels: Numpy array of target (label) data. :param validation_split: Float between 0 and 1. Percentage of training data to use for validation :param batch_size: :param epochs: number of times to train over input dataset. """ self.model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7), loss=categorical_crossentropy, metrics=['accuracy']) self.model.fit(image_data, labels, epochs=epochs, validation_split=validation_split, callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)])
Example #17
Source File: model_pruning_example.py From Ridurre-Network-Filter-Pruning-Keras with MIT License | 5 votes |
def compile_model(my_model): my_model.compile(optimizer=optimizers.Adam(lr=0.001), loss=losses.categorical_crossentropy, metrics=["accuracy"])
Example #18
Source File: classifier.py From BERT_with_keras with MIT License | 5 votes |
def __init__(self, bert_config, pretrain_model_path, batch_size, seq_length, optimizer, num_classes, metrics=None, use_token_type=True, mask=True, max_predictions_per_seq=20, multi_gpu=None, loss=None): if not isinstance(bert_config, BertConfig): raise ValueError("`bert_config` must be a instance of `BertConfig`") if multi_gpu: if not tf.test.is_gpu_available: raise ValueError("GPU is not available.") self.config = bert_config self.batch_size = batch_size self.seq_length = seq_length self.use_token_type = use_token_type self.max_predictions_per_seq = max_predictions_per_seq self.mask = mask self.num_classes = num_classes self.loss = loss or losses.categorical_crossentropy if multi_gpu: with tf.device('/cpu:0'): model = self._build_model(pretrain_model_path) model.compile(optimizer=optimizer, loss=self.loss, metrics=metrics) parallel_model = multi_gpu_model(model=model, gpus=multi_gpu) parallel_model.compile(optimizer=optimizer, loss=self.loss, metrics=metrics) else: model = self._build_model(pretrain_model_path) model.compile(optimizer=optimizer, loss=self.loss, metrics=metrics) self.estimator = model if multi_gpu: self.estimator = parallel_model
Example #19
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_weighted_masked_objective(): a = Input(shape=(3,), name='input_a') # weighted_masked_objective def mask_dummy(y_true=None, y_pred=None, weight=None): return K.placeholder(y_true.shape) weighted_function = _weighted_masked_objective(losses.categorical_crossentropy) weighted_function(a, a, None)
Example #20
Source File: one_hot_model.py From tying-wv-and-wc with MIT License | 5 votes |
def compile(self): self.model.compile( loss=losses.categorical_crossentropy, optimizer=LangModelSGD(self.setting), metrics=["accuracy", self.perplexity] )
Example #21
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_bad_shape(): a = np.random.random((2, 3, 5)) with pytest.raises(ValueError) as exc: _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, 3, 6)]) assert 'targets to have the same shape' in str(exc)
Example #22
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_weighted_masked_objective(): a = Input(shape=(3,), name='input_a') # weighted_masked_objective def mask_dummy(y_true=None, y_pred=None, weight=None): return K.placeholder(y_true.shape) weighted_function = _weighted_masked_objective(losses.categorical_crossentropy) weighted_function(a, a, None)
Example #23
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_not_failing(): a = np.random.random((2, 1, 3)) _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape]) _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, None, 3)])
Example #24
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_weighted_masked_objective(): a = Input(shape=(3,), name='input_a') # weighted_masked_objective def mask_dummy(y_true=None, y_pred=None, weight=None): return K.placeholder(y_true.shape) weighted_function = _weighted_masked_objective(losses.categorical_crossentropy) weighted_function(a, a, None)
Example #25
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_last_is_one(): a = np.random.random((2, 3, 1)) with pytest.raises(ValueError) as exc: _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape]) assert 'You are passing a target array' in str(exc)
Example #26
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_not_failing(): a = np.random.random((2, 1, 3)) _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape]) _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, None, 3)])
Example #27
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_weighted_masked_objective(): a = Input(shape=(3,), name='input_a') # weighted_masked_objective def mask_dummy(y_true=None, y_pred=None, weight=None): return K.placeholder(y_true.shape) weighted_function = _weighted_masked_objective(losses.categorical_crossentropy) weighted_function(a, a, None)
Example #28
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_bad_shape(): a = np.random.random((2, 3, 5)) with pytest.raises(ValueError) as exc: _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, 3, 6)]) assert 'targets to have the same shape' in str(exc)
Example #29
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_last_is_one(): a = np.random.random((2, 3, 1)) with pytest.raises(ValueError) as exc: _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape]) assert 'You are passing a target array' in str(exc)
Example #30
Source File: test_training.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_check_not_failing(): a = np.random.random((2, 1, 3)) _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape]) _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, None, 3)])