Python tensorflow.compat.v2.ones() Examples
The following are 30
code examples of tensorflow.compat.v2.ones().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v2
, or try the search function
.
Example #1
Source File: brownian_motion_utils.py From tf-quant-finance with Apache License 2.0 | 6 votes |
def outer_multiply(x, y): """Performs an outer multiplication of two tensors. Given two `Tensor`s, `S` and `T` of shape `s` and `t` respectively, the outer product `P` is a `Tensor` of shape `s + t` whose components are given by: ```none P_{i1,...ik, j1, ... , jm} = S_{i1...ik} T_{j1, ... jm} ``` Args: x: A `Tensor` of any shape and numeric dtype. y: A `Tensor` of any shape and the same dtype as `x`. Returns: outer_product: A `Tensor` of shape Shape[x] + Shape[y] and the same dtype as `x`. """ x_shape = tf.shape(x) padded_shape = tf.concat( [x_shape, tf.ones(tf.rank(y), dtype=x_shape.dtype)], axis=0) return tf.reshape(x, padded_shape) * y
Example #2
Source File: __init__.py From language with Apache License 2.0 | 6 votes |
def nonneg_crossentropy(expr, target): """A cross entropy operator that is appropriate for NQL outputs. Query expressions often evaluate to sparse vectors. This evaluates cross entropy safely. Args: expr: a Tensorflow expression for some predicted values. target: a Tensorflow expression for target values. Returns: Tensorflow expression for cross entropy. """ expr_replacing_0_with_1 = \ tf.where(expr > 0, expr, tf.ones(tf.shape(input=expr), tf.float32)) cross_entropies = tf.reduce_sum( input_tensor=-target * tf.math.log(expr_replacing_0_with_1), axis=1) return tf.reduce_mean(input_tensor=cross_entropies, axis=0)
Example #3
Source File: optimizer_test.py From tf-quant-finance with Apache License 2.0 | 6 votes |
def test_lbfgs_minimize(self): """Use L-BFGS algorithm to optimize randomly generated quadratic bowls.""" np.random.seed(12345) dim = 10 batches = 50 minima = np.random.randn(batches, dim) scales = np.exp(np.random.randn(batches, dim)) @tff_math.make_val_and_grad_fn def quadratic(x): return tf.reduce_sum(input_tensor=scales * (x - minima) ** 2, axis=-1) start = tf.ones((batches, dim), dtype='float64') results = self.evaluate(tff_math.optimizer.lbfgs_minimize( quadratic, initial_position=start, stopping_condition=tff_math.optimizer.converged_any, tolerance=1e-8)) self.assertTrue(results.converged.any()) self.assertEqual(results.position.shape, minima.shape) self.assertNDArrayNear( results.position[results.converged], minima[results.converged], 1e-5)
Example #4
Source File: __init__.py From language with Apache License 2.0 | 6 votes |
def nonneg_softmax(expr, replace_nonpositives = -10): """A softmax operator that is appropriate for NQL outputs. NeuralQueryExpressions often evaluate to sparse vectors of small, nonnegative values. Softmax for those is dominated by zeros, so this is a fix. This also fixes the problem that minibatches for NQL are one example per column, not one example per row. Args: expr: a Tensorflow expression for some predicted values. replace_nonpositives: will replace zeros with this value before computing softmax. Returns: Tensorflow expression for softmax. """ if replace_nonpositives != 0.0: ones = tf.ones(tf.shape(input=expr), tf.float32) expr = tf.where(expr > 0.0, expr, ones * replace_nonpositives) return tf.nn.softmax(expr)
Example #5
Source File: utils_test.py From valan with Apache License 2.0 | 6 votes |
def testBatchApply(self): time_dim = 4 batch_dim = 5 inputs = { 'a': tf.zeros(shape=(time_dim, batch_dim)), 'b': { 'b_1': tf.ones(shape=(time_dim, batch_dim, 9, 10)), 'b_2': tf.ones(shape=(time_dim, batch_dim, 6)), } } def f(tensors): np.testing.assert_array_almost_equal( np.zeros(shape=(time_dim * batch_dim)), tensors['a'].numpy()) np.testing.assert_array_almost_equal( np.ones(shape=(time_dim * batch_dim, 9, 10)), tensors['b']['b_1'].numpy()) np.testing.assert_array_almost_equal( np.ones(shape=(time_dim * batch_dim, 6)), tensors['b']['b_2'].numpy()) return tf.ones(shape=(time_dim * batch_dim, 2)) result = utils.batch_apply(f, inputs) np.testing.assert_array_almost_equal( np.ones(shape=(time_dim, batch_dim, 2)), result.numpy())
Example #6
Source File: euler_sampling_test.py From tf-quant-finance with Apache License 2.0 | 6 votes |
def test_sample_paths_dtypes(self): """Sampled paths have the expected dtypes.""" for dtype in [np.float32, np.float64]: drift_fn = lambda t, x: tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype) vol_fn = lambda t, x: t * tf.ones([1, 1], dtype=t.dtype) paths = self.evaluate( euler_sampling.sample( dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, times=[0.1, 0.2], num_samples=10, initial_state=[0.1], time_step=0.01, seed=123, dtype=dtype)) self.assertEqual(paths.dtype, dtype)
Example #7
Source File: utils_test.py From tf-quant-finance with Apache License 2.0 | 6 votes |
def test_maybe_update_along_axis(self, dtype): """Tests that the values are updated correctly.""" tensor = tf.ones([5, 4, 3, 2], dtype=dtype) new_tensor = tf.zeros([5, 4, 1, 2], dtype=dtype) @tf.function def maybe_update_along_axis(do_update): return utils.maybe_update_along_axis( tensor=tensor, new_tensor=new_tensor, axis=1, ind=2, do_update=do_update) updated_tensor = maybe_update_along_axis(True) with self.subTest(name='Shape'): self.assertEqual(updated_tensor.shape, tensor.shape) with self.subTest(name='UpdatedVals'): self.assertAllEqual(updated_tensor[:, 2, :, :], tf.zeros_like(updated_tensor[:, 2, :, :])) with self.subTest(name='NotUpdatedVals'): self.assertAllEqual(updated_tensor[:, 1, :, :], tf.ones_like(updated_tensor[:, 2, :, :])) with self.subTest(name='DoNotUpdateVals'): not_updated_tensor = maybe_update_along_axis(False) self.assertAllEqual(not_updated_tensor, tensor)
Example #8
Source File: base_agent_test.py From valan with Apache License 2.0 | 6 votes |
def _neck(self, torso_output, state): # Verify state. It could have been reset if done was true. expected_state = np.copy(self._current_state.numpy()) done = self._done[self._timestep] for i, d in enumerate(done): if d: expected_state[i] = np.zeros(self._init_state_size) np.testing.assert_array_almost_equal(expected_state, state.numpy()) # Verify torso_output expected_torso_output = np.concatenate([ np.ones(shape=(self._batch_size, 50)), np.zeros(shape=(self._batch_size, 50)) ], axis=1) np.testing.assert_array_almost_equal(expected_torso_output, torso_output.numpy()) self._timestep += 1 self._current_state = state + 1 return (tf.ones([self._batch_size, 6]) * self._timestep, self._current_state)
Example #9
Source File: array_ops.py From trax with Apache License 2.0 | 6 votes |
def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring M = M if M is not None else N if dtype is not None: dtype = utils.result_type(dtype) else: dtype = dtypes.default_float_type() if k < 0: lower = -k - 1 if lower > N: r = tf.zeros([N, M], dtype) else: # Keep as tf bool, since we create an upper triangular matrix and invert # it. o = tf.ones([N, M], dtype=tf.bool) r = tf.cast(tf.math.logical_not(tf.linalg.band_part(o, lower, -1)), dtype) else: o = tf.ones([N, M], dtype) if k > M: r = o else: r = tf.linalg.band_part(o, -1, k) return utils.tensor_to_ndarray(r)
Example #10
Source File: array_ops.py From trax with Apache License 2.0 | 6 votes |
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name """Returns an ndarray with the given shape and type filled with ones. Args: shape: A fully defined shape. Could be - NumPy array or a python scalar, list or tuple of integers, - TensorFlow tensor/ndarray of integer type and rank <=1. dtype: Optional, defaults to float. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. Returns: An ndarray. """ if dtype: dtype = utils.result_type(dtype) if isinstance(shape, arrays_lib.ndarray): shape = shape.data return arrays_lib.tensor_to_ndarray(tf.ones(shape, dtype=dtype))
Example #11
Source File: generic_ito_process_test.py From tf-quant-finance with Apache License 2.0 | 6 votes |
def test_sample_paths_dtypes(self): """Sampled paths have the expected dtypes.""" for dtype in [np.float32, np.float64]: drift_fn = lambda t, x: tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype) vol_fn = lambda t, x: t * tf.ones([1, 1], dtype=t.dtype) process = GenericItoProcess( dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=dtype) paths = self.evaluate( process.sample_paths( times=[0.1, 0.2], num_samples=10, initial_state=[0.1], time_step=0.01, seed=123)) self.assertEqual(paths.dtype, dtype) # Several tests below are unit tests for GenericItoProcess.fd_solver_backward: # they mock out the pde solver and check only the conversion of SDE to PDE, # but not PDE solving. There are also integration tests further below.
Example #12
Source File: utils_test.py From valan with Apache License 2.0 | 5 votes |
def testGetRowNestedTensor(self): x = { 'a': tf.constant([[0., 0.], [1., 1.]]), 'b': { 'b_1': tf.ones(shape=(2, 3)) } } result = utils.get_row_nested_tensor(x, 1) np.testing.assert_array_almost_equal( np.array([1., 1.]), result['a'].numpy()) np.testing.assert_array_almost_equal( np.array([1., 1., 1.]), result['b']['b_1'].numpy())
Example #13
Source File: __init__.py From language with Apache License 2.0 | 5 votes |
def jump_to_all(self, type_name): """A universal set containing all entities of some type. Args: type_name: the string type name Returns: A NeuralQueryExpression for an all-ones vector for the types. """ return self.context.all(type_name)
Example #14
Source File: __init__.py From language with Apache License 2.0 | 5 votes |
def all(self, type_name): """A universal set containing all entities of some type. Args: type_name: the string type name Returns: A NeuralQueryExpression for an all-ones vector for the types. """ provenance = NQExprProvenance(operation='all', args=(type_name, None)) np_vec = np.ones((1, self.get_max_id(type_name)), dtype='float32') return self.as_nql(np_vec, type_name, provenance)
Example #15
Source File: swap_curve_fit.py From tf-quant-finance with Apache License 2.0 | 5 votes |
def _initialize_instrument_weights(float_times, fixed_times, dtype): """Function to compute default initial weights for optimization.""" weights = tf.ones(len(float_times), dtype=dtype) one = tf.ones([], dtype=dtype) float_times_last = tf.stack([times[-1] for times in float_times]) fixed_times_last = tf.stack([times[-1] for times in fixed_times]) weights = tf.maximum(one / float_times_last, one / fixed_times_last) weights = tf.minimum(one, weights) return tf.unstack(weights, name='instrument_weights')
Example #16
Source File: zero_coupon_bond_option.py From tf-quant-finance with Apache License 2.0 | 5 votes |
def _cumprod_using_matvec(input_tensor): """Computes cumprod using matrix algebra.""" dtype = input_tensor.dtype axis_length = input_tensor.shape.as_list()[-1] ones = tf.ones([axis_length, axis_length], dtype=dtype) lower_triangular = tf.linalg.band_part(ones, -1, 0) cumsum = tf.linalg.matvec(lower_triangular, tf.math.log(input_tensor)) return tf.math.exp(cumsum)
Example #17
Source File: generic_ito_process_test.py From tf-quant-finance with Apache License 2.0 | 5 votes |
def test_sample_paths_2d(self): """Tests path properties for 2-dimentional Ito process. We construct the following Ito processes. dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2 dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2 mu_1, mu_2 are constants. s_ij = a_ij t + b_ij For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5. """ mu = np.array([0.2, 0.7]) a = np.array([[0.4, 0.1], [0.3, 0.2]]) b = np.array([[0.33, -0.03], [0.21, 0.5]]) def drift_fn(t, x): return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype) def vol_fn(t, x): del x return (a * t + b) * tf.ones([2, 2], dtype=t.dtype) num_samples = 10000 process = GenericItoProcess(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn) times = np.array([0.1, 0.21, 0.32, 0.43, 0.55]) x0 = np.array([0.1, -1.1]) paths = self.evaluate( process.sample_paths( times, num_samples=num_samples, initial_state=x0, time_step=0.01, seed=12134)) self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0) means = np.mean(paths, axis=0) times = np.reshape(times, [-1, 1]) expected_means = x0 + (2.0 / 3.0) * mu * np.power(times, 1.5) self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2)
Example #18
Source File: joined_ito_process_test.py From tf-quant-finance with Apache License 2.0 | 5 votes |
def test_inconsistent_dtype(self): """Tests that all proceses should have the same dtype.""" def drift_fn(t, x): del t, x return -1. / 2 def vol_fn(t, x): del t return tf.ones([1, 1], dtype=x.dtype) process_1 = tff.models.GenericItoProcess( dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=np.float32) process_2 = tff.models.GenericItoProcess( dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=np.float64) with self.assertRaises(ValueError): tff.models.JoinedItoProcess([process_1, process_2], [[1.0], [1.0]])
Example #19
Source File: nest_map_test.py From agents with Apache License 2.0 | 5 votes |
def testIncompatibleStructureInputs(self): with self.assertRaisesRegex( ValueError, r'`nested_layers` and `input_spec` do not have matching structures'): nest_map.NestMap( tf.keras.layers.Dense(8), input_spec={'ick': tf.TensorSpec(8, tf.float32)}) with self.assertRaisesRegex( ValueError, r'`inputs` and `self.nested_layers` do not have matching structures'): net = nest_map.NestMap(tf.keras.layers.Dense(8)) net.create_variables({'ick': tf.TensorSpec((1,), dtype=tf.float32)}) with self.assertRaisesRegex( ValueError, r'`inputs` and `self.nested_layers` do not have matching structures'): net = nest_map.NestMap(tf.keras.layers.Dense(8)) net({'ick': tf.constant([[1.0]])}) with self.assertRaisesRegex( ValueError, r'`network_state` and `state_spec` do not have matching structures'): net = nest_map.NestMap( tf.keras.layers.LSTM(8, return_state=True, return_sequences=True)) net(tf.ones((1, 2)), network_state=(tf.ones((1, 1)), ()))
Example #20
Source File: utils_test.py From valan with Apache License 2.0 | 5 votes |
def testTimeBatchDim(self): x = tf.ones(shape=(2, 3)) y = tf.ones(shape=(2, 3, 4)) x, y = utils.add_time_batch_dim(x, y) np.testing.assert_equal((1, 1, 2, 3), x.shape) np.testing.assert_equal((1, 1, 2, 3, 4), y.shape) x, y = utils.remove_time_batch_dim(x, y) np.testing.assert_equal((2, 3), x.shape) np.testing.assert_equal((2, 3, 4), y.shape)
Example #21
Source File: extensions_test.py From trax with Apache License 2.0 | 5 votes |
def testAvgPool(self): y = extensions.avg_pool(np.ones([5, 320, 480, 3]), [3, 5], [2, 3], "VALID") self.assertAllEqual( y, tf.nn.pool( input=tf.ones([5, 320, 480, 3]), window_shape=[3, 5], pooling_type="AVG", padding="VALID", strides=[2, 3], ))
Example #22
Source File: utils_test.py From valan with Apache License 2.0 | 5 votes |
def testGatherFromDict(self): one_d_tensor_dict = { 0: tf.ones(shape=(5)) * 0., 1: tf.ones(shape=(5)) * 1., 2: tf.ones(shape=(5)) * 2., 3: tf.ones(shape=(5)) * 3., } choice = tf.constant([3, 0, 1, 1, 2]) np.testing.assert_array_almost_equal( np.array([3., 0., 1., 1., 2.]), utils.gather_from_dict(one_d_tensor_dict, choice)) choice = tf.constant([1, 1, 1, 1, 1]) np.testing.assert_array_almost_equal( np.array([1., 1., 1., 1., 1.]), utils.gather_from_dict(one_d_tensor_dict, choice)) one_d_tensor_dict = { 'a': tf.ones(shape=(5)) * 0., 'b': tf.ones(shape=(5)) * 1., 'c': tf.ones(shape=(5)) * 2., 'd': tf.ones(shape=(5)) * 3., } choice = tf.constant(['a', 'b', 'c', 'd', 'b']) np.testing.assert_array_almost_equal( np.array([0., 1., 2., 3., 1.]), utils.gather_from_dict(one_d_tensor_dict, choice)) two_d_tensor_dict = { 0: tf.ones(shape=(5, 2)) * 0., 1: tf.ones(shape=(5, 2)) * 1., 2: tf.ones(shape=(5, 2)) * 2., 3: tf.ones(shape=(5, 2)) * 3., } choice = tf.constant([3, 0, 1, 1, 2]) np.testing.assert_array_almost_equal( np.array([[3., 3.], [0., 0.], [1., 1.], [1., 1.], [2., 2.]]), utils.gather_from_dict(two_d_tensor_dict, choice))
Example #23
Source File: utils.py From valan with Apache License 2.0 | 5 votes |
def gather_from_dict(tensor_dict, choice): """Chooses tensor values along first dimension using given choice. If `tensor_dict` = { 0: zeros(shape=(6)), 1: ones(shape=(6)), 2: twos(shape=(6)), 3: threes(shape=(6)) } and choice = [0, 0, 2, 2, 1, 0] then returned tensor is [0., 0., 2., 2., 1., 0.] Args: tensor_dict: A dict with int keys and tensor values. All tensor values must be of same type and shape. choice: A 1-d int tensor with number of elements equal to first dimension of tensors in `tensor_dict`. The values in the tensor must be valid keys in `tensor_dict`. Returns: A tensor of same type and shape as tensors in `tensor_dict`. """ one_tensor = next(iter(tensor_dict.values())) # Check number of elements in `choice`. tf.debugging.assert_rank(choice, rank=1) tf.debugging.assert_equal(tf.size(choice), tf.shape(one_tensor)[0]) zeros_tensor = tf.zeros_like(one_tensor) final_tensor = zeros_tensor for c, t in tensor_dict.items(): # Check shapes and type tf.debugging.assert_equal(tf.shape(t), tf.shape(one_tensor)) tf.debugging.assert_type(t, tf_type=one_tensor.dtype) final_tensor += tf.compat.v1.where(tf.equal(choice, c), t, zeros_tensor) return final_tensor
Example #24
Source File: base_agent_test.py From valan with Apache License 2.0 | 5 votes |
def _torso(self, observation): # Verify observation. np.testing.assert_equal((self._total_timesteps * self._batch_size, 50), observation[_OBS_KEY_1].shape) np.testing.assert_array_almost_equal( np.ones((self._total_timesteps * self._batch_size, 50)), observation[_OBS_KEY_1]) np.testing.assert_equal((self._total_timesteps * self._batch_size, 50), observation[_OBS_KEY_0].shape) np.testing.assert_array_almost_equal( np.zeros((self._total_timesteps * self._batch_size, 50)), observation[_OBS_KEY_0]) return tf.concat([observation[_OBS_KEY_1], observation[_OBS_KEY_0]], axis=1)
Example #25
Source File: base_agent_test.py From valan with Apache License 2.0 | 5 votes |
def _head(self, neck_output): # Verify neck_output np.testing.assert_equal((self._total_timesteps * self._batch_size, 6), neck_output.shape) arrays = [] for i in range(self._total_timesteps): arrays.append(np.ones((self._batch_size, 6)) * (i + 1)) expected_neck_output = np.concatenate(arrays, axis=0) np.testing.assert_array_almost_equal(expected_neck_output, neck_output.numpy()) return common.AgentOutput( policy_logits=tf.zeros( shape=[self._total_timesteps * self._batch_size, 4]), baseline=tf.ones(shape=[self._total_timesteps * self._batch_size]))
Example #26
Source File: base_agent_test.py From valan with Apache License 2.0 | 5 votes |
def testWithMockAgent(self): total_timesteps = 3 batch_size = 4 done = np.array([[True, True, False, False], [True, False, True, False], [False, False, False, False]]) init_state = tf.constant([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0]]) env_output = common.EnvOutput( reward=None, done=done, observation={ _OBS_KEY_1: tf.ones([total_timesteps, batch_size, 50]), _OBS_KEY_0: tf.zeros([total_timesteps, batch_size, 50]), }, info=None) agent = MockAgent(3, 4, init_state, done) agent.reset_timestep() agent_output, final_state = agent(env_output, init_state) np.testing.assert_array_almost_equal( np.zeros((total_timesteps, batch_size, 4)), agent_output.policy_logits) np.testing.assert_array_almost_equal( np.ones((total_timesteps, batch_size)), agent_output.baseline) expected_final_state = np.array([[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0], [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]]) np.testing.assert_array_almost_equal(expected_final_state, final_state.numpy())
Example #27
Source File: base_agent_test.py From valan with Apache License 2.0 | 5 votes |
def testWithMockAgent_DoneAllFalse(self): total_timesteps = 3 batch_size = 4 done = np.array([[False, False, False, False], [False, False, False, False], [False, False, False, False]]) init_state = tf.constant([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0]]) env_output = common.EnvOutput( reward=None, done=done, observation={ _OBS_KEY_1: tf.ones([total_timesteps, batch_size, 50]), _OBS_KEY_0: tf.zeros([total_timesteps, batch_size, 50]), }, info=None) agent = MockAgent(3, 4, init_state, done) agent.reset_timestep() agent_output, final_state = agent(env_output, init_state) np.testing.assert_array_almost_equal( np.zeros((total_timesteps, batch_size, 4)), agent_output.policy_logits) np.testing.assert_array_almost_equal( np.ones((total_timesteps, batch_size)), agent_output.baseline) expected_final_state = np.array([[4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0], [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]]) np.testing.assert_array_almost_equal(expected_final_state, final_state.numpy())
Example #28
Source File: testing_utils.py From valan with Apache License 2.0 | 5 votes |
def _torso(self, observation): # Verify shapes of observation. first_dim = observation['f1'].shape.as_list()[0] np.testing.assert_equal((first_dim, 4, 10), observation['f1'].shape) np.testing.assert_equal((first_dim, 7, 10, 2), observation['f2'].shape) return tf.ones(shape=[first_dim, 50])
Example #29
Source File: testing_utils.py From valan with Apache License 2.0 | 5 votes |
def _neck(self, torso_output, state): return tf.ones([tf.shape(torso_output)[0], 6]), state + 1
Example #30
Source File: testing_utils.py From valan with Apache License 2.0 | 5 votes |
def _head(self, neck_output): return common.AgentOutput( policy_logits=self._logits_layer(neck_output), baseline=tf.ones(shape=[tf.shape(neck_output)[0]]))