Python tensorflow.variable_axis_size_partitioner() Examples
The following are 4
code examples of tensorflow.variable_axis_size_partitioner().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: partitioned_variables_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testControlDepsNone(self): with self.test_session() as session: c = tf.constant(1.0) with tf.control_dependencies([c]): # d get the control dependency. d = tf.constant(2.0) # Partitioned variables do not. var_x = tf.get_variable( "x", initializer=tf.ones_initializer([2]), partitioner=tf.variable_axis_size_partitioner(4)) ops_before_read = session.graph.get_operations() var_x.as_tensor() # Caches the ops for subsequent reads. reading_ops = [op for op in session.graph.get_operations() if op not in ops_before_read] self.assertEqual([c.op], d.op.control_inputs) # Tests that no control dependencies are added to reading a partitioned # variable which is similar to reading a variable. for op in reading_ops: self.assertEqual([], op.control_inputs)
Example #2
Source File: partitioned_variables_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testConcat(self): with self.test_session() as session: var_x = tf.get_variable( "x", initializer=tf.constant([1., 2.]), partitioner=tf.variable_axis_size_partitioner(4)) c = tf.constant(1.0) with tf.control_dependencies([c]): ops_before_concat = session.graph.get_operations() value = var_x._concat() # pylint: disable=protected-access concat_ops = [op for op in session.graph.get_operations() if op not in ops_before_concat] concat_control_inputs = [ci for op in concat_ops for ci in op.control_inputs] self.assertTrue( c.op in concat_control_inputs, "var_x._concat() should get control dependencies from its scope.") tf.global_variables_initializer().run() self.assertAllClose(value.eval(), var_x.as_tensor().eval())
Example #3
Source File: partitioned_variables_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _testVariableAxisSizePartitioner(self, name, axis, max_shard_bytes, expected_axis_shards, expected_partitions, max_shards=None): partitioner = tf.variable_axis_size_partitioner( axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards) with tf.variable_scope("root", partitioner=partitioner): v0 = tf.get_variable(name, dtype=tf.float32, shape=(4, 8, 16, 32)) v0_list = v0._get_variable_list() v0_part = v0._get_partitions() self.assertEqual(len(v0_list), expected_axis_shards) self.assertAllEqual(v0_part, expected_partitions)
Example #4
Source File: localhost_cluster_performance_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def benchmark_create_1000_partitions_with_100_parameter_servers(self): workers, _ = create_local_cluster(num_workers=1, num_ps=100) worker_sessions = [tf.Session(w.target) for w in workers] worker = worker_sessions[0] partition_sizes = (1, 512, 1024*32, 1024*128) partitioned = [] for partition_size in partition_sizes: # max_shard_bytes is 4, shape is 1000*partition_size float32s which should # partition into 1000 shards, each containing partition_size float32s. print("Building partitioned variable with %d floats per partition" % partition_size) with tf.device(tf.train.replica_device_setter(ps_tasks=100)): partitioned_ix = tf.get_variable( "partitioned_%d" % partition_size, shape=[1000 * partition_size], dtype=tf.float32, # Each partition to have exactly N float32s partitioner=tf.variable_axis_size_partitioner( max_shard_bytes=4 * partition_size)) # Concatenates along axis 0 partitioned.append(tf.convert_to_tensor(partitioned_ix)) tf.global_variables_initializer().run(session=worker) for ix, partition_size in enumerate(partition_sizes): print("Running benchmark having partitions with %d floats" % partition_size) self.run_op_benchmark( worker, partitioned[ix], name=("read_concat_1000_partitions_from_" "100_parameter_servers_partsize_%d_floats" % partition_size))