Python tensorflow.convert_to_tensor() Examples
The following are 30
code examples of tensorflow.convert_to_tensor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: common_layers.py From fine-lm with MIT License | 6 votes |
def shape_list(x): """Return list of dims, statically where possible.""" x = tf.convert_to_tensor(x) # If unknown rank, return dynamic shape if x.get_shape().dims is None: return tf.shape(x) static = x.get_shape().as_list() shape = tf.shape(x) ret = [] for i in range(len(static)): dim = static[i] if dim is None: dim = shape[i] ret.append(dim) return ret
Example #2
Source File: utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def GenerateBinomialTable(m): """Generate binomial table. Args: m: the size of the table. Returns: A two dimensional array T where T[i][j] = (i choose j), for 0<= i, j <=m. """ table = numpy.zeros((m + 1, m + 1), dtype=numpy.float64) for i in range(m + 1): table[i, 0] = 1 for i in range(1, m + 1): for j in range(1, m + 1): v = table[i - 1, j] + table[i - 1, j -1] assert not math.isnan(v) and not math.isinf(v) table[i, j] = v return tf.convert_to_tensor(table)
Example #3
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None): """Define a L1L2 regularizer. Args: weight_l1: scale the L1 loss by this factor. weight_l2: scale the L2 loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function. """ def regularizer(tensor): with tf.name_scope(scope, 'L1L2Regularizer', [tensor]): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=tensor.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=tensor.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') return regularizer
Example #4
Source File: model_train.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def anchor_target_layer(cls_pred, bbox, im_info, scope_name): with tf.variable_scope(scope_name) as scope: # 'rpn_cls_score', 'gt_boxes', 'im_info' rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = \ tf.py_func(anchor_target_layer_py, [cls_pred, bbox, im_info, [16, ], [16]], [tf.float32, tf.float32, tf.float32, tf.float32]) rpn_labels = tf.convert_to_tensor(tf.cast(rpn_labels, tf.int32), name='rpn_labels') rpn_bbox_targets = tf.convert_to_tensor(rpn_bbox_targets, name='rpn_bbox_targets') rpn_bbox_inside_weights = tf.convert_to_tensor(rpn_bbox_inside_weights, name='rpn_bbox_inside_weights') rpn_bbox_outside_weights = tf.convert_to_tensor(rpn_bbox_outside_weights, name='rpn_bbox_outside_weights') return [rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights]
Example #5
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def l2_regularizer(weight=1.0, scope=None): """Define a L2 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function. """ def regularizer(tensor): with tf.name_scope(scope, 'L2Regularizer', [tensor]): l2_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') return regularizer
Example #6
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def l1_regularizer(weight=1.0, scope=None): """Define a L1 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function. """ def regularizer(tensor): with tf.name_scope(scope, 'L1Regularizer', [tensor]): l1_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') return regularizer
Example #7
Source File: prediction_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def stp_transformation(prev_image, stp_input, num_masks): """Apply spatial transformer predictor (STP) to previous image. Args: prev_image: previous image to be transformed. stp_input: hidden layer to be used for computing STN parameters. num_masks: number of masks and hence the number of STP transformations. Returns: List of images transformed by the predicted STP parameters. """ # Only import spatial transformer if needed. from spatial_transformer import transformer identity_params = tf.convert_to_tensor( np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] for i in range(num_masks - 1): params = slim.layers.fully_connected( stp_input, 6, scope='stp_params' + str(i), activation_fn=None) + identity_params transformed.append(transformer(prev_image, params)) return transformed
Example #8
Source File: mobilenet.py From DeepLab_v3 with MIT License | 6 votes |
def global_pool(input_tensor, pool_op=tf.nn.avg_pool): """Applies avg pool to produce 1x1 output. NOTE: This function is funcitonally equivalenet to reduce_mean, but it has baked in average pool which has better support across hardware. Args: input_tensor: input tensor pool_op: pooling op (avg pool is default) Returns: a tensor batch_size x 1 x 1 x depth. """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size = tf.convert_to_tensor( [1, tf.shape(input_tensor)[1], tf.shape(input_tensor)[2], 1]) else: kernel_size = [1, shape[1], shape[2], 1] output = pool_op( input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID') # Recover output shape, for unknown shape. output.set_shape([None, 1, 1, None]) return output
Example #9
Source File: focal_loss.py From tf2-yolo3 with Apache License 2.0 | 6 votes |
def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None): y_pred = tf.convert_to_tensor(y_pred) if not y_pred.dtype.is_floating: y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32) if from_logits: return _binary_focal_loss_from_logits(labels=y_true, logits=y_pred, gamma=gamma, pos_weight=pos_weight, label_smoothing=label_smoothing) else: return _binary_focal_loss_from_probs(labels=y_true, p=y_pred, gamma=gamma, pos_weight=pos_weight, label_smoothing=label_smoothing)
Example #10
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def l1_loss(tensor, weight=1.0, scope=None): """Define a L1Loss, useful for regularize, i.e. lasso. Args: tensor: tensor to regularize. weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: the L1 loss op. """ with tf.name_scope(scope, 'L1Loss', [tensor]): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value') tf.add_to_collection(LOSSES_COLLECTION, loss) return loss
Example #11
Source File: mobilenet.py From hierarchical_loc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def global_pool(input_tensor, pool_op=tf.nn.avg_pool): """Applies avg pool to produce 1x1 output. NOTE: This function is funcitonally equivalenet to reduce_mean, but it has baked in average pool which has better support across hardware. Args: input_tensor: input tensor pool_op: pooling op (avg pool is default) Returns: a tensor batch_size x 1 x 1 x depth. """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size = tf.convert_to_tensor( [1, tf.shape(input_tensor)[1], tf.shape(input_tensor)[2], 1]) else: kernel_size = [1, shape[1], shape[2], 1] output = pool_op( input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID') # Recover output shape, for unknown shape. output.set_shape([None, 1, 1, None]) return output
Example #12
Source File: vgg_preprocessing.py From DOTA_models with Apache License 2.0 | 6 votes |
def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image
Example #13
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def l2_loss(tensor, weight=1.0, scope=None): """Define a L2Loss, useful for regularize, i.e. weight decay. Args: tensor: tensor to regularize. weight: an optional weight to modulate the loss. scope: Optional scope for name_scope. Returns: the L2 loss op. """ with tf.name_scope(scope, 'L2Loss', [tensor]): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') tf.add_to_collection(LOSSES_COLLECTION, loss) return loss
Example #14
Source File: tf_example_decoder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDecodeJpegImage(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) decoded_jpeg = self._DecodeImage(encoded_jpeg) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/source_id': self._BytesFeature('image_id'), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[fields.InputDataFields.image]. get_shape().as_list()), [None, None, 3]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image]) self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
Example #15
Source File: tf_example_decoder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDecodeImageKeyAndFilename(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/key/sha256': self._BytesFeature('abc'), 'image/filename': self._BytesFeature('filename') })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.decode(tf.convert_to_tensor(example)) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertEqual('abc', tensor_dict[fields.InputDataFields.key]) self.assertEqual('filename', tensor_dict[fields.InputDataFields.filename])
Example #16
Source File: tf_example_decoder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDecodePngImage(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_png = self._EncodeImage(image_tensor, encoding_type='png') decoded_png = self._DecodeImage(encoded_png, encoding_type='png') example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_png), 'image/format': self._BytesFeature('png'), 'image/source_id': self._BytesFeature('image_id') })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[fields.InputDataFields.image]. get_shape().as_list()), [None, None, 3]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image]) self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
Example #17
Source File: ops_test.py From object_detector_app with MIT License | 6 votes |
def test_normalized_to_image_coordinates(self): normalized_boxes = tf.placeholder(tf.float32, shape=(None, 1, 4)) normalized_boxes_np = np.array([[[0.0, 0.0, 1.0, 1.0]], [[0.5, 0.5, 1.0, 1.0]]]) image_shape = tf.convert_to_tensor([1, 4, 4, 3], dtype=tf.int32) absolute_boxes = ops.normalized_to_image_coordinates(normalized_boxes, image_shape, parallel_iterations=2) expected_boxes = np.array([[[0, 0, 4, 4]], [[2, 2, 4, 4]]]) with self.test_session() as sess: absolute_boxes = sess.run(absolute_boxes, feed_dict={normalized_boxes: normalized_boxes_np}) self.assertAllEqual(absolute_boxes, expected_boxes)
Example #18
Source File: tf_example_decoder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDecodeObjectLabel(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) bbox_classes = [0, 1] example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/class/label': self._Int64Feature(bbox_classes), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[ fields.InputDataFields.groundtruth_classes].get_shape().as_list()), [None]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual(bbox_classes, tensor_dict[fields.InputDataFields.groundtruth_classes])
Example #19
Source File: tf_example_decoder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDecodeObjectIsCrowd(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) object_is_crowd = [0, 1] example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/is_crowd': self._Int64Feature(object_is_crowd), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[ fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()), [None]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual([bool(item) for item in object_is_crowd], tensor_dict[ fields.InputDataFields.groundtruth_is_crowd])
Example #20
Source File: tf_example_decoder_test.py From object_detector_app with MIT License | 6 votes |
def testDecodeObjectDifficult(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) object_difficult = [0, 1] example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/difficult': self._Int64Feature(object_difficult), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.Decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[ fields.InputDataFields.groundtruth_difficult].get_shape().as_list()), [None]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual([bool(item) for item in object_difficult], tensor_dict[ fields.InputDataFields.groundtruth_difficult])
Example #21
Source File: ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_normalized_to_image_coordinates(self): normalized_boxes = tf.placeholder(tf.float32, shape=(None, 1, 4)) normalized_boxes_np = np.array([[[0.0, 0.0, 1.0, 1.0]], [[0.5, 0.5, 1.0, 1.0]]]) image_shape = tf.convert_to_tensor([1, 4, 4, 3], dtype=tf.int32) absolute_boxes = ops.normalized_to_image_coordinates(normalized_boxes, image_shape, parallel_iterations=2) expected_boxes = np.array([[[0, 0, 4, 4]], [[2, 2, 4, 4]]]) with self.test_session() as sess: absolute_boxes = sess.run(absolute_boxes, feed_dict={normalized_boxes: normalized_boxes_np}) self.assertAllEqual(absolute_boxes, expected_boxes)
Example #22
Source File: tf_example_decoder_test.py From object_detector_app with MIT License | 6 votes |
def testDecodeObjectIsCrowd(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) object_is_crowd = [0, 1] example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/is_crowd': self._Int64Feature(object_is_crowd), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.Decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[ fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()), [None]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual([bool(item) for item in object_is_crowd], tensor_dict[ fields.InputDataFields.groundtruth_is_crowd])
Example #23
Source File: tf_example_decoder_test.py From object_detector_app with MIT License | 6 votes |
def testDecodeObjectLabel(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) bbox_classes = [0, 1] example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/class/label': self._Int64Feature(bbox_classes), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.Decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[ fields.InputDataFields.groundtruth_classes].get_shape().as_list()), [None]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual(bbox_classes, tensor_dict[fields.InputDataFields.groundtruth_classes])
Example #24
Source File: tf_example_decoder_test.py From object_detector_app with MIT License | 6 votes |
def testDecodePngImage(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_png = self._EncodeImage(image_tensor, encoding_type='png') decoded_png = self._DecodeImage(encoded_png, encoding_type='png') example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_png), 'image/format': self._BytesFeature('png'), 'image/source_id': self._BytesFeature('image_id') })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.Decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[fields.InputDataFields.image]. get_shape().as_list()), [None, None, 3]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image]) self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
Example #25
Source File: tf_example_decoder_test.py From object_detector_app with MIT License | 6 votes |
def testDecodeImageKeyAndFilename(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/key/sha256': self._BytesFeature('abc'), 'image/filename': self._BytesFeature('filename') })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.Decode(tf.convert_to_tensor(example)) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertEqual('abc', tensor_dict[fields.InputDataFields.key]) self.assertEqual('filename', tensor_dict[fields.InputDataFields.filename])
Example #26
Source File: tf_example_decoder_test.py From object_detector_app with MIT License | 6 votes |
def testDecodeJpegImage(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) decoded_jpeg = self._DecodeImage(encoded_jpeg) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/source_id': self._BytesFeature('image_id'), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.Decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[fields.InputDataFields.image]. get_shape().as_list()), [None, None, 3]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image]) self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
Example #27
Source File: tf_example_decoder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDecodeObjectDifficult(self): image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) encoded_jpeg = self._EncodeImage(image_tensor) object_difficult = [0, 1] example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': self._BytesFeature(encoded_jpeg), 'image/format': self._BytesFeature('jpeg'), 'image/object/difficult': self._Int64Feature(object_difficult), })).SerializeToString() example_decoder = tf_example_decoder.TfExampleDecoder() tensor_dict = example_decoder.decode(tf.convert_to_tensor(example)) self.assertAllEqual((tensor_dict[ fields.InputDataFields.groundtruth_difficult].get_shape().as_list()), [None]) with self.test_session() as sess: tensor_dict = sess.run(tensor_dict) self.assertAllEqual([bool(item) for item in object_difficult], tensor_dict[ fields.InputDataFields.groundtruth_difficult])
Example #28
Source File: kfac.py From lirpg with MIT License | 5 votes |
def apply_gradients(self, grads): coldOptim = tf.train.MomentumOptimizer( self._cold_lr, self._momentum) def coldSGDstart(): sgd_grads, sgd_var = zip(*grads) if self.max_grad_norm != None: sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm) sgd_grads = list(zip(sgd_grads,sgd_var)) sgd_step_op = tf.assign_add(self.sgd_step, 1) coldOptim_op = coldOptim.apply_gradients(sgd_grads) if KFAC_DEBUG: with tf.control_dependencies([sgd_step_op, coldOptim_op]): sgd_step_op = tf.Print( sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')]) return tf.group(*[sgd_step_op, coldOptim_op]) kfacOptim_op, qr = self.apply_gradients_kfac(grads) def warmKFACstart(): return kfacOptim_op return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
Example #29
Source File: kfac.py From lirpg with MIT License | 5 votes |
def applyStatsEigen(self, eigen_list): updateOps = [] print(('updating %d eigenvalue/vectors' % len(eigen_list))) for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)): stats_eigen_var = self.eigen_reverse_lookup[mark] updateOps.append( tf.assign(stats_eigen_var, tensor, use_locking=True)) with tf.control_dependencies(updateOps): factor_step_op = tf.assign_add(self.factor_step, 1) updateOps.append(factor_step_op) if KFAC_DEBUG: updateOps.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('updated kfac factors')])) return updateOps
Example #30
Source File: kfac.py From HardRLWithYoutube with MIT License | 5 votes |
def applyStatsEigen(self, eigen_list): updateOps = [] print(('updating %d eigenvalue/vectors' % len(eigen_list))) for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)): stats_eigen_var = self.eigen_reverse_lookup[mark] updateOps.append( tf.assign(stats_eigen_var, tensor, use_locking=True)) with tf.control_dependencies(updateOps): factor_step_op = tf.assign_add(self.factor_step, 1) updateOps.append(factor_step_op) if KFAC_DEBUG: updateOps.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('updated kfac factors')])) return updateOps