Python numpy.array_repr() Examples
The following are 30
code examples of numpy.array_repr().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: batch_major_attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testTransformerDecoderLayerFProp(self): with self.session(use_gpu=True) as sess: (query_vec, paddings, aux_vec, aux_paddings) = self._TransformerAttentionLayerInputs() l = self._ConstructTransformerDecoderLayer() layer_output, _ = l.FProp(l.theta, query_vec, paddings, aux_vec, aux_paddings) tf.global_variables_initializer().run() actual_layer_output = sess.run(layer_output) actual_layer_output = np.reshape(actual_layer_output, (10, 4)) tf.logging.info(np.array_repr(actual_layer_output)) expected_layer_output = [16.939590, 24.121685, 19.975197, 15.924350] self.assertAllClose(expected_layer_output, np.sum(actual_layer_output, axis=0))
Example #2
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testConvSetLayerFProp(self): # pyformat: disable # pylint: disable=bad-whitespace,bad-continuation expected_output1 = [ [[[ 1.04307961, 0. , 1.27613628, 0. ], [ 0. , 0. , 0. , 1.21081829 ]], [[ 0. , 0.18475296, 0. , 0. ], [ 1.34087086 , 2.2726357 , 0. , 0. ]]], [[[ 0. , 0.25231963, 0. , 0. ], [ 1.13677704 , 0. , 0.996117 , 1.836285 ]], [[ 0. , 0. , 1.04101253, 0. ], [ 0.12628449 , 0.37599814, 0.3134549 , 0.51208746 ]]] ] # pyformat: enable # pylint: enable=bad-whitespace,bad-continuation actual = self._evalConvSetLayerFProp() print(['actual = ', np.array_repr(actual)]) self.assertAllClose(expected_output1, actual)
Example #3
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testProjectionLayerWeightNorm(self): # pylint: disable=bad-whitespace # pyformat: disable expected_output = [ [[ 0. , 0.36285588], [ 0.82909501, 1.07323885], [ 0.81163716, 0. ], [ 0. , 0.04895319]], [[ 1.26859784, 0. ], [ 0.56601691, 0. ], [ 0. , 0. ], [ 0. , 1.66046333]]] # pyformat: enable # pylint: enable=bad-whitespace for reshape_to_2d in (False, True): actual = self._evalProjectionLayer( reshape_to_2d=reshape_to_2d, weight_norm=True) if reshape_to_2d: expected_output = np.reshape(np.array(expected_output), (-1, 2)) tf.logging.info('expected = %s', expected_output) tf.logging.info('actual = %s', np.array_repr(actual)) self.assertAllClose(expected_output, actual)
Example #4
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testProjectionLayerFProp(self): # pylint: disable=bad-whitespace # pyformat: disable expected_output = [ [[ 0. , 0.33779466], [ 0.4527415 , 0.99911398], [ 0.44320837, 0. ], [ 0. , 0.04557215]], [[ 0.69273949, 0. ], [ 0.30908319, 0. ], [ 0. , 0. ], [ 0. , 1.54578114]]] # pyformat: enable # pylint: enable=bad-whitespace for reshape_to_2d in (False, True): actual = self._evalProjectionLayer( reshape_to_2d=reshape_to_2d, expect_bn_fold_weights=False) if reshape_to_2d: expected_output = np.reshape(np.array(expected_output), (-1, 2)) tf.logging.info('expected = %s', expected_output) tf.logging.info('actual = %s', np.array_repr(actual)) self.assertAllClose(expected_output, actual)
Example #5
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testConvSetLayerFPropQuantized(self): # pyformat: disable # pylint: disable=bad-whitespace,bad-continuation expected_output1 = [ [[[ 1.04016984, 0. , 1.28103447, 0. ], [ 0. , 0. , 0. , 1.20986581]], [[ 0. , 0.18681753, 0. , 0. ], [ 1.35328221, 2.26849842, 0. , 0. ]]], [[[ 0. , 0.24909003, 0. , 0. ], [ 1.14100266, 0. , 0.98746401, 1.83259094]], [[ 0. , 0. , 1.04084051, 0. ], [ 0.12736773, 0.38253111, 0.32025862, 0.5159722 ]]]] # pyformat: enable # pylint: enable=bad-whitespace,bad-continuation actual = self._evalConvSetLayerFProp(bn_fold_weights=True, quantized=True) # Note that we don't have many ways to verify in a unit test that the # quant nodes were added properly; however, if their placement changes, # it will very likely perturb the golden values above. If digging deeper, # add 'dump_graphdef=True' to the above call and inspect the graphdef: # There should be one layer of fake_quant* nodes before the ConcatV2. print('actual = ', np.array_repr(actual)) self.assertAllClose(expected_output1, actual) # TODO(yonghui): more test for convolution layer
Example #6
Source File: encoder_test.py From lingvo with Apache License 2.0 | 6 votes |
def testBiEncoderForwardPassWithDropout(self): with self.session(use_gpu=False): tf.random.set_seed(8372749040) p = self._BiEncoderParams() p.dropout_prob = 0.5 mt_enc = encoder.MTEncoderBiRNN(p) batch = py_utils.NestedMap() batch.ids = tf.transpose(tf.reshape(tf.range(0, 8, 1), [4, 2])) batch.paddings = tf.zeros([2, 4]) enc_out = mt_enc.FPropDefaultTheta(batch).encoded self.evaluate(tf.global_variables_initializer()) actual_enc_out = enc_out.eval() print('bi_enc_actual_enc_out_with_dropout', np.array_repr(actual_enc_out)) expected_enc_out = [[[-1.8358192e-05, 1.2103478e-05], [2.9347059e-06, -3.0652325e-06]], [[-8.1282624e-06, 4.5443494e-06], [3.0826509e-06, -5.2950490e-06]], [[-4.6669629e-07, 2.4246765e-05], [-1.5221613e-06, -1.9654153e-06]], [[-1.1511075e-05, 1.9061190e-05], [-5.7250163e-06, 9.2785704e-06]]] self.assertAllClose(expected_enc_out, actual_enc_out)
Example #7
Source File: model_test.py From lingvo with Apache License 2.0 | 6 votes |
def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() p.dtype = dtype if fprop_dtype: p.fprop_dtype = fprop_dtype p.input.dtype = fprop_dtype mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose(vals, [[233.57518, 10.381119], [236.10052, 10.378047], [217.99896, 10.380901], [217.94647, 10.378406], [159.5997, 10.380468]])
Example #8
Source File: model_test.py From lingvo with Apache License 2.0 | 6 votes |
def testFProp(self, dtype=tf.float32): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() p.dtype = dtype mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(3): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) expected_vals = [ [326.765106, 10.373495], [306.018066, 10.373494], [280.08429, 10.373492], ] self.assertAllClose(vals, expected_vals)
Example #9
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testProjectionLayerFPropWithBias(self): # pylint: disable=bad-whitespace # pyformat: disable expected_output = [ [[ 4.98987579, 5.03493643], [ 5.01192808, 5.0917592 ], [ 5.01156807, 4.99741936], [ 4.96849394, 5.00982761]], [[ 5.02098131, 4.98014927], [ 5.00650883, 4.87676954], [ 4.98995209, 4.91770315], [ 4.95948696, 5.138731 ]]] # pyformat: enable # pylint: enable=bad-whitespace # Tested without batch_norm because batch_norm will mostly cancel out the # affect of bias. actual = self._evalProjectionLayer( has_bias=True, batch_norm=False, expect_bn_fold_weights=False, activation='RELU6') tf.logging.info('expected = %s', expected_output) tf.logging.info('actual = %s', np.array_repr(actual)) self.assertAllClose(expected_output, actual)
Example #10
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testSeparableConv2DLayerWeightNormFProp(self): # pyformat: disable # pylint: disable=bad-whitespace expected_output = [ [[[ 0.41837293, 0. ], [ 1.39592457, 0. ]], [[ 0. , 0. ], [ 0. , 0. ]]], [[[ 1.20513153, 0.11938372], [ 0.1284119 , 0.6927582 ]], [[ 0.0227453 , 2.05591369], [ 0. , 0.26530063]]]] # pyformat: enable # pylint: enable=bad-whitespace actual = self._evalConvLayerFProp( weight_norm=True, params_builder=layers.SeparableConv2DLayer.Params) print('actual1 = ', np.array_repr(actual)) self.assertAllClose(expected_output, actual)
Example #11
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testConv2DLayerWeightNormFProp(self): # pyformat: disable # pylint: disable=bad-whitespace expected_output = [ [[[ 0.37172362, 0.92405349], [ 0.07635488, 0.]], [[ 0.35431579, 0.], [ 1.94415355, 0.]]], [[[ 0.28692839, 0.], [ 0. , 0.]], [[ 0. , 0.87443149], [ 0. , 1.61808443]]]] # pyformat: enable # pylint: enable=bad-whitespace actual = self._evalConvLayerFProp(weight_norm=True) print('actual1 = ', np.array_repr(actual)) self.assertAllClose(expected_output, actual)
Example #12
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testConv2DLayerFPropConvLast(self): # pyformat: disable # pylint: disable=bad-whitespace expected_output1 = [ [[[ 0.22165056, 0.20731729], [ 0.09577402, -0.15359652]], [[ 0.07151584, 0.03027298], [ 0.05370769, 0.0143405 ]]], [[[-0.08854639, 0.06143938], [-0.37708873, 0.00889082]], [[-0.58154356, 0.30798748], [-0.37575331, 0.54729235]]]] # pyformat: enable # pylint: enable=bad-whitespace actual = self._evalConvLayerFProp(conv_last=True) print(['ConvLast actual = ', np.array_repr(actual)]) self.assertAllClose(expected_output1, actual)
Example #13
Source File: rnn_cell_test.py From lingvo with Apache License 2.0 | 6 votes |
def _testLNLSTMCellFPropBProp(self, params, num_hidden_nodes=None): tf.reset_default_graph() lstm, _, state1 = self._testLNLSTMCellHelper(params, num_hidden_nodes) loss = -tf.math.log( tf.sigmoid( tf.reduce_sum(tf.square(state1.m)) + tf.reduce_sum(state1.m * state1.c * state1.c))) grads = tf.gradients(loss, lstm.vars.Flatten()) with self.session(use_gpu=False): self.evaluate(tf.global_variables_initializer()) m_v, c_v, grads_v = self.evaluate([state1.m, state1.c, grads]) tf.logging.info('m_v = %s', np.array_repr(m_v)) tf.logging.info('c_v = %s', np.array_repr(c_v)) grads_val = py_utils.NestedMap() for (n, _), val in zip(lstm.vars.FlattenItems(), grads_v): tf.logging.info('%s : %s', n, np.array_repr(val)) grads_val[n] = val return m_v, c_v, grads_val # pyformat: disable
Example #14
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testSeparableConv2DLayerFProp(self): # pyformat: disable # pylint: disable=bad-whitespace expected_output1 =[ [[[ 0.39866772, 0. ], [ 1.36471784, 0. ]], [[ 0. , 0. ], [ 0. , 0. ]]], [[[ 1.15356529, 0.1036691 ], [ 0.12865055, 0.61244327]], [[ 0.03609803, 1.81620765], [ 0. , 0.23052886]]]] # pyformat: enable # pylint: enable=bad-whitespace actual = self._evalConvLayerFProp( params_builder=layers.SeparableConv2DLayer.Params) print('actual = ', np.array_repr(actual)) self.assertAllClose(expected_output1, actual)
Example #15
Source File: batch_major_attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testTransformerLayerFPropWithCrossAttention(self, multiplier): with self.session(use_gpu=True) as sess: (query_vec, _, aux_vec, aux_paddings) = self._TransformerAttentionLayerInputs() query_vec = tf.tile(query_vec, [multiplier, 1, 1]) paddings = tf.zeros([2 * multiplier, 5]) p = attention.TransformerLayer.Params() p.name = 'transformer_layer' p.input_dim = 4 p.tr_fflayer_tpl.hidden_dim = 7 p.tr_atten_tpl.num_heads = 2 p.params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=0) l = p.Instantiate() ctx_vec, _ = l.FProp(l.theta, query_vec, paddings, aux_vec, aux_paddings) tf.global_variables_initializer().run() actual_ctx = sess.run(ctx_vec) actual_ctx = np.reshape(actual_ctx, (10 * multiplier, 4)) tf.logging.info(np.array_repr(actual_ctx)) expected_ctx = [ 4.7839108, 4.5303655, 5.5551023, 5.065767, 5.0493064, 3.2142467, 2.8200178, 5.659971, 4.3814187, 2.60475 ] * multiplier self.assertAllClose(expected_ctx, np.sum(actual_ctx, axis=1))
Example #16
Source File: batch_major_attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testTransformerAttentionLayerFPropCrossAttention(self): with self.session(use_gpu=True) as sess: (query_vec, _, aux_vec, aux_paddings) = self._TransformerAttentionLayerInputs() p = attention.TransformerAttentionLayer.Params().Set( name='transformer_cross_atten', input_dim=4, is_masked=False, num_heads=2) p.params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=0) l = p.Instantiate() ctx_vec, _ = l.FProp(l.theta, query_vec, aux_vec, aux_paddings) tf.global_variables_initializer().run() actual_ctx = sess.run(ctx_vec) actual_ctx = np.reshape(actual_ctx, (10, 4)) tf.logging.info(np.array_repr(actual_ctx)) expected_ctx = [19.345360, 15.057412, 13.744134, 13.387347] self.assertAllClose(expected_ctx, np.sum(actual_ctx, axis=0))
Example #17
Source File: batch_major_attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testTransformerDecoderLayerStackFProp(self): with self.session(use_gpu=True) as sess: (query_vec, paddings, aux_vec, aux_paddings) = self._TransformerAttentionLayerInputs() l = self._ConstructTransformerDecoderLayerStack() layer_output, _ = l.FProp( l.theta, query_vec=query_vec, paddings=paddings, aux_vec=aux_vec, aux_paddings=aux_paddings) tf.global_variables_initializer().run() actual_layer_output = sess.run(layer_output) actual_layer_output = np.reshape(actual_layer_output, (10, 4)) tf.logging.info(np.array_repr(actual_layer_output)) expected_layer_output = [9.926413, -4.491376, 27.051598, 2.112684] self.assertAllClose(expected_layer_output, np.sum(actual_layer_output, axis=0))
Example #18
Source File: batch_major_attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testTransformerAttentionLayerFPropMaskedSelfAttention(self): with self.session(use_gpu=True) as sess: query_vec, paddings, _, _ = self._TransformerAttentionLayerInputs() p = attention.TransformerAttentionLayer.Params().Set( name='transformer_masked_self_atten', input_dim=4, is_masked=True, num_heads=2) p.params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=0) l = p.Instantiate() ctx_vec, _ = l.FProp(l.theta, query_vec, None, paddings) tf.global_variables_initializer().run() actual_ctx = sess.run(ctx_vec) actual_ctx = np.reshape(actual_ctx, (10, 4)) tf.logging.info(np.array_repr(actual_ctx)) expected_ctx = [7.777687, 5.219166, 6.305151, 4.817311] self.assertAllClose(expected_ctx, np.sum(actual_ctx, axis=0))
Example #19
Source File: beam_search_helper_test.py From lingvo with Apache License 2.0 | 6 votes |
def testBeamSearchHelperWithSeqLengths(self): with self.session(use_gpu=False) as sess: topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults( sess, num_hyps_per_beam=3, pass_seq_lengths=True) print(np.array_repr(topk_ids)) print(np.array_repr(topk_lens)) print(np.array_repr(topk_scores)) expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0], [4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2], [6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]] expected_topk_lens = [5, 4, 4, 7, 6, 6] expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776], [9.74691486, 8.46679497, 7.14809656]] self.assertEqual(expected_topk_ids, topk_ids.tolist()) self.assertEqual(expected_topk_lens, topk_lens.tolist()) self.assertAllClose(expected_topk_scores, topk_scores)
Example #20
Source File: attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testPerStepSourcePaddingMultiHeadedAttention(self): params = attention.MultiHeadedAttention.Params() params.name = 'atten' params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374) depth = 6 params.source_dim = depth params.query_dim = depth params.hidden_dim = depth params.vn.global_vn = False params.vn.per_step_vn = False atten = params.Instantiate() prob_out, vec_out = self._testPerStepSourcePaddingHelper(atten, depth) print('vec_out', np.array_repr(np.sum(vec_out, 1))) self.assertAllClose([-0.006338, -0.025153, 0.041647, -0.025153], np.sum(vec_out, 1)) self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))
Example #21
Source File: attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testPerStepSourcePaddingLocationSensitiveAttention(self): params = attention.LocationSensitiveAttention.Params() params.name = 'atten' params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374) depth = 6 params.source_dim = depth params.query_dim = depth params.hidden_dim = depth params.location_filter_size = 3 params.location_num_filters = 4 params.vn.global_vn = False params.vn.per_step_vn = False atten_state = tf.concat( [tf.ones([4, 1], tf.float32), tf.zeros([4, 5], tf.float32)], 1) atten_state = tf.expand_dims(atten_state, 1) atten = params.Instantiate() prob_out, vec_out = self._testPerStepSourcePaddingHelper( atten, depth, atten_state=atten_state) print('vec_out', np.array_repr(np.sum(vec_out, 1))) self.assertAllClose([2.001103, 3.293414, 2.306448, 3.293414], np.sum(vec_out, 1)) self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))
Example #22
Source File: attention_test.py From lingvo with Apache License 2.0 | 6 votes |
def testPerStepSourcePaddingMonotonicAttention(self): params = attention.MonotonicAttention.Params() params.name = 'atten' params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374) depth = 6 params.source_dim = depth params.query_dim = depth params.hidden_dim = depth params.vn.global_vn = False params.vn.per_step_vn = False atten = params.Instantiate() atten_state = atten.ZeroAttentionState(6, 4) atten_state.emit_probs = tf.concat( [tf.ones([4, 1], tf.float32), tf.zeros([4, 5], tf.float32)], 1) prob_out, vec_out = self._testPerStepSourcePaddingHelper( atten, depth, atten_state=atten_state) print('prob_out', np.array_repr(np.sum(prob_out, 1))) print('vec_out', np.array_repr(np.sum(vec_out, 1)))
Example #23
Source File: rnn_cell_test.py From lingvo with Apache License 2.0 | 6 votes |
def testLSTMSimpleWithForgetGateInitBias(self, couple_input_forget_gates, b_expected): params = rnn_cell.LSTMCellSimple.Params().Set( name='lstm', params_init=py_utils.WeightInit.Constant(0.1), couple_input_forget_gates=couple_input_forget_gates, num_input_nodes=2, num_output_nodes=3, forget_gate_bias=2.0, bias_init=py_utils.WeightInit.Constant(0.1), dtype=tf.float64) lstm = rnn_cell.LSTMCellSimple(params) np.random.seed(_NUMPY_RANDOM_SEED) with self.session(use_gpu=False): self.evaluate(tf.global_variables_initializer()) b_value = lstm._GetBias(lstm.theta).eval() tf.logging.info('testLSTMSimpleWithForgetGateInitBias b = %s', np.array_repr(b_value)) self.assertAllClose(b_value, b_expected) # pyformat: disable
Example #24
Source File: layers_test.py From lingvo with Apache License 2.0 | 6 votes |
def testConv2DLayerFProp(self): # pyformat: disable # pylint: disable=bad-whitespace expected_output1 = [ [[[ 0.36669245, 0.91488785], [ 0.07532132, 0. ]], [[ 0.34952009, 0. ], [ 1.91783941, 0. ]]], [[[ 0.28304493, 0. ], [ 0. , 0. ]], [[ 0. , 0.86575812], [ 0. , 1.60203481]]]] # pyformat: enable # pylint: enable=bad-whitespace actual = self._evalConvLayerFProp() print('actual = ', np.array_repr(actual)) self.assertAllClose(expected_output1, actual)
Example #25
Source File: attention_test.py From lingvo with Apache License 2.0 | 5 votes |
def testPerStepSourcePaddingDotProductAttention(self): params = attention.DotProductAttention.Params() params.name = 'atten' depth = 6 params.source_dim = depth params.query_dim = depth params.hidden_dim = depth params.vn.global_vn = False params.vn.per_step_vn = False atten = params.Instantiate() prob_out, vec_out = self._testPerStepSourcePaddingHelper(atten, depth) print('vec_out', np.array_repr(np.sum(vec_out, 1))) self.assertAllClose([2.02671742, 3.38590097, 2.34964013, 3.38590097], np.sum(vec_out, 1)) self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))
Example #26
Source File: batch_major_attention_test.py From lingvo with Apache License 2.0 | 5 votes |
def testTransformerEncoderLayerStackFProp(self): with self.session(use_gpu=True) as sess: (query_vec, paddings, _, _) = self._TransformerAttentionLayerInputs() l = self._ConstructTransformerEncoderLayerStack() layer_output, _ = l.FProp(l.theta, query_vec=query_vec, paddings=paddings) tf.global_variables_initializer().run() actual_layer_output = sess.run(layer_output) actual_layer_output = np.reshape(actual_layer_output, (10, 4)) tf.logging.info(np.array_repr(actual_layer_output)) expected_layer_output = [6.178955, -11.376661, 7.032681, -1.532627] self.assertAllClose(expected_layer_output, np.sum(actual_layer_output, axis=0))
Example #27
Source File: layers_test.py From lingvo with Apache License 2.0 | 5 votes |
def testProjectionLayerExplicitFoldingNoBatchNorm(self): unfolded = self._evalProjectionLayer( batch_norm=False, bn_fold_weights=False, expect_bn_fold_weights=False) # Note that weight folding will report as disabled because batch norm is # disabled. folded = self._evalProjectionLayer( batch_norm=False, bn_fold_weights=True, expect_bn_fold_weights=False) tf.logging.info('unfolded = %s', np.array_repr(unfolded)) tf.logging.info('folded = %s', np.array_repr(folded)) self.assertAllClose(folded, unfolded)
Example #28
Source File: layers_test.py From lingvo with Apache License 2.0 | 5 votes |
def testProjectionLayerExplicitFoldingEval(self): unfolded = self._evalProjectionLayer( bn_fold_weights=False, expect_bn_fold_weights=False, is_eval=True) folded = self._evalProjectionLayer( bn_fold_weights=True, expect_bn_fold_weights=True, is_eval=True) tf.logging.info('unfolded = %s', np.array_repr(unfolded)) tf.logging.info('folded = %s', np.array_repr(folded)) self.assertAllClose(folded, unfolded)
Example #29
Source File: layers_test.py From lingvo with Apache License 2.0 | 5 votes |
def testProjectionLayerExplicitFolding(self): unfolded = self._evalProjectionLayer( bn_fold_weights=False, expect_bn_fold_weights=False) folded = self._evalProjectionLayer( bn_fold_weights=True, expect_bn_fold_weights=True) tf.logging.info('unfolded = %s', np.array_repr(unfolded)) tf.logging.info('folded = %s', np.array_repr(folded)) self.assertAllClose(folded, unfolded)
Example #30
Source File: py_utils.py From lingvo with Apache License 2.0 | 5 votes |
def _Print(name, x): with _PrintOptions(linewidth=1000): tf.logging.info('%s = %s', name, np.array_repr(x))