Python tensorflow.python.keras.backend.floatx() Examples
The following are 13
code examples of tensorflow.python.keras.backend.floatx().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.backend
, or try the search function
.
Example #1
Source File: Generate.py From YouTubeCommenter with MIT License | 5 votes |
def new_sparse_categorical_accuracy(y_true, y_pred): y_pred_rank = ops.convert_to_tensor(y_pred).get_shape().ndims y_true_rank = ops.convert_to_tensor(y_true).get_shape().ndims # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None) and (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))): y_true = array_ops.squeeze(y_true, [-1]) y_pred = math_ops.argmax(y_pred, axis=-1) # If the predicted output and actual output types don't match, force cast them # to match. if K.dtype(y_pred) != K.dtype(y_true): y_pred = math_ops.cast(y_pred, K.dtype(y_true)) return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx()) #Load the model
Example #2
Source File: Train.py From YouTubeCommenter with MIT License | 5 votes |
def new_sparse_categorical_accuracy(y_true, y_pred): y_pred_rank = ops.convert_to_tensor(y_pred).get_shape().ndims y_true_rank = ops.convert_to_tensor(y_true).get_shape().ndims # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None) and (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))): y_true = array_ops.squeeze(y_true, [-1]) y_pred = math_ops.argmax(y_pred, axis=-1) # If the predicted output and actual output types don't match, force cast them # to match. if K.dtype(y_pred) != K.dtype(y_true): y_pred = math_ops.cast(y_pred, K.dtype(y_true)) return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx()) #Build the training models
Example #3
Source File: helpers.py From tacotron2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, batch_size, output_dim, r, n_feed_frame=1, dtype=None): assert n_feed_frame <= r self._batch_size = batch_size self._output_dim = output_dim self._end_token = tf.tile([0.0], [output_dim * r]) self.n_feed_frame = n_feed_frame self._dtype = dtype or backend.floatx()
Example #4
Source File: helpers.py From tacotron2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, batch_size, output_dim, r, n_feed_frame=1, min_iters=10, dtype=None): assert n_feed_frame <= r self._batch_size = batch_size self._output_dim = output_dim self.n_feed_frame = n_feed_frame self.min_iters = min_iters self._dtype = dtype or backend.floatx()
Example #5
Source File: modules.py From tacotron2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, num_symbols, embedding_dim, index_offset=0, output_dtype=None, trainable=True, name=None, dtype=None, **kwargs): self._dtype = dtype or backend.floatx() # To ensure self.dtype is float type, set dtype explicitly. super(Embedding, self).__init__(name=name, trainable=trainable, dtype=self._dtype, **kwargs) self._num_symbols = num_symbols self._embedding_dim = embedding_dim self._output_dtype = output_dtype or backend.floatx() self.index_offset = tf.convert_to_tensor(index_offset, dtype=tf.int64)
Example #6
Source File: tacotron_v2.py From tacotron2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, num_units, memory, memory_sequence_length, attention_kernel, attention_filters, smoothing=False, cumulative_weights=True, dtype=None, name="LocationSensitiveAttention"): probability_fn = self._smoothing_normalization if smoothing else None super(LocationSensitiveAttention, self).__init__( num_units=num_units, memory=memory, memory_sequence_length=memory_sequence_length, probability_fn=probability_fn, dtype=dtype, name=name) self._cumulative_weights = cumulative_weights self._dtype = dtype or backend.floatx() self.location_convolution = tf.layers.Conv1D(filters=attention_filters, kernel_size=attention_kernel, padding="SAME", use_bias=True, bias_initializer=tf.zeros_initializer(dtype=memory.dtype), name="location_features_convolution", dtype=dtype) self.location_layer = tf.layers.Dense(units=num_units, use_bias=False, name="location_features_layer", dtype=dtype)
Example #7
Source File: utils.py From camera-trap-classifier with MIT License | 5 votes |
def build_masked_loss(loss_function, mask_value=-1): """Builds a loss function that masks based on targets Args: loss_function: The loss function to mask mask_value: The value to mask in the targets Returns: function: a loss function that acts like loss_function with masked inputs """ def masked_loss_function(y_true, y_pred, mask_value=mask_value): mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) return loss_function(y_true * mask, y_pred * mask) return masked_loss_function
Example #8
Source File: adamw.py From keras_imagenet with MIT License | 5 votes |
def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] wd = self.wd * self.wd_normalizer # decoupled weight decay (4/6) lr = self.lr if self.initial_decay > 0: lr = lr * (1. / (1. + self.decay * math_ops.cast(self.iterations, K.dtype(self.decay)))) eta_t = lr / self.init_lr # decoupled weight decay (5/6) with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]): t = math_ops.cast(self.iterations, K.floatx()) """Bias corrections according to the Adam paper.""" lr_t = lr * (K.sqrt(1. - math_ops.pow(self.beta_2, t)) / (1. - math_ops.pow(self.beta_1, t))) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] self.weights = [self.iterations] + ms + vs for p, g, m, v in zip(params, grads, ms, vs): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g) p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) p_t -= eta_t * wd * p # decoupled weight decay (6/6) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) new_p = p_t # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(K.update(p, new_p)) return self.updates
Example #9
Source File: keras.py From estimator with Apache License 2.0 | 5 votes |
def _cast_tensor_to_floatx(x): """Cast tensor to keras's floatx dtype if it is not already the same dtype.""" if x.dtype == K.floatx(): return x else: return tf.cast(x, K.floatx())
Example #10
Source File: helpers.py From self-attention-tacotron with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, batch_size, mgc_output_dim, lf0_output_dim, r, n_feed_frame=1, min_iters=10, dtype=None): assert n_feed_frame <= r self._batch_size = batch_size self._mgc_output_dim = mgc_output_dim self._lf0_output_dim = lf0_output_dim self.r = r self.n_feed_frame = n_feed_frame self.min_iters = min_iters self._dtype = dtype or backend.floatx()
Example #11
Source File: layers.py From neuron with GNU General Public License v3.0 | 4 votes |
def __init__(self, shape, my_initializer='RandomNormal', dtype=None, name=None, mult=1.0, **kwargs): # some input checking if not name: prefix = 'local_param' name = prefix + '_' + str(backend.get_uid(prefix)) if not dtype: dtype = backend.floatx() self.shape = [1, *shape] self.my_initializer = my_initializer self.mult = mult if not name: prefix = 'param' name = '%s_%d' % (prefix, K.get_uid(prefix)) Layer.__init__(self, name=name, **kwargs) # Create a trainable weight variable for this layer. with K.name_scope(self.name): self.kernel = self.add_weight(name='kernel', shape=shape, initializer=self.my_initializer, dtype=dtype, trainable=True) # prepare output tensor, which is essentially the kernel. output_tensor = K.expand_dims(self.kernel, 0) * self.mult output_tensor._keras_shape = self.shape output_tensor._uses_learning_phase = False output_tensor._keras_history = base_layer.KerasHistory(self, 0, 0) output_tensor._batch_input_shape = self.shape self.trainable = True self.built = True self.is_placeholder = False # create new node tensorflow.python.keras.engine.base_layer.node_module.Node(self, inbound_layers=[], node_indices=[], tensor_indices=[], input_tensors=[], output_tensors=[output_tensor], input_masks=[], output_masks=[None], input_shapes=[], output_shapes=self.shape)
Example #12
Source File: data_preprocesser.py From BCNN-keras-clean with MIT License | 4 votes |
def _get_batches_of_transformed_samples(self, index_array): batch_x = np.zeros( (len(index_array),) + self.image_shape, dtype=floatx()) grayscale = self.color_mode == 'grayscale' # Build batch of image data for i, j in enumerate(index_array): fname = self.filenames[j] img = load_img( os.path.join(self.directory, fname), grayscale=grayscale, target_size=None, interpolation=self.interpolation) x = img_to_array(img, data_format=self.data_format) # Pillow images should be closed after `load_img`, but not PIL images. if hasattr(img, 'close'): img.close() x = self.image_data_generator.standardize(x) batch_x[i] = x # Optionally save augmented images to disk for debugging purposes if self.save_to_dir: for i, j in enumerate(index_array): img = array_to_img(batch_x[i], self.data_format, scale=True) fname = '{prefix}_{index}_{hash}.{format}'.format( prefix=self.save_prefix, index=j, hash=np.random.randint(1e7), format=self.save_format) img.save(os.path.join(self.save_to_dir, fname)) # Build batch of labels if self.class_mode == 'input': batch_y = batch_x.copy() elif self.class_mode == 'sparse': batch_y = self.classes[index_array] elif self.class_mode == 'binary': batch_y = self.classes[index_array].astype(floatx()) elif self.class_mode == 'categorical': batch_y = np.zeros( (len(batch_x), self.num_classes), dtype=floatx()) for i, label in enumerate(self.classes[index_array]): batch_y[i, label] = 1. else: return batch_x return batch_x, batch_y
Example #13
Source File: radam.py From multi-label-classification with MIT License | 4 votes |
def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [] lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * math_ops.cast(self.iterations, K.dtype(self.decay)))) with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]): t = math_ops.cast(self.iterations, K.floatx()) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] if self.amsgrad: vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] else: vhats = [K.zeros(1) for _ in params] self.weights = [self.iterations] + ms + vs + vhats beta_1_power = math_ops.pow(self.beta_1, t) beta_2_power = math_ops.pow(self.beta_2, t) rho_t = self.rho_inf - 2.0 * t * beta_2_power / (1.0 - beta_2_power) lr_t = tf.where(rho_t >= 5.0, K.sqrt((rho_t - 4.) * (rho_t - 2.) * self.rho_inf / ((self.rho_inf - 4.) * (self.rho_inf - 2.) * rho_t)) * lr * (K.sqrt(1. - beta_2_power) / (1. - beta_1_power)), self.warmup_coef * lr / (1. - beta_1_power)) for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g) if self.amsgrad: vhat_t = math_ops.maximum(vhat, v_t) p_t = p - lr_t * tf.where(rho_t >= 5.0, m_t / (K.sqrt(vhat_t) + self.epsilon), m_t) self.updates.append(state_ops.assign(vhat, vhat_t)) else: p_t = p - lr_t * tf.where(rho_t >= 5.0, m_t / (K.sqrt(v_t) + self.epsilon), m_t) self.updates.append(state_ops.assign(m, m_t)) self.updates.append(state_ops.assign(v, v_t)) new_p = p_t # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(state_ops.assign(p, new_p)) return self.updates