Python tensorflow.compat.v1.placeholder() Examples
The following are 30
code examples of tensorflow.compat.v1.placeholder().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: run_squad_v1.py From albert with Apache License 2.0 | 6 votes |
def build_squad_serving_input_fn(seq_length): """Builds a serving input fn for raw input.""" def _seq_serving_input_fn(): """Serving input fn for raw images.""" input_ids = tf.placeholder( shape=[1, seq_length], name="input_ids", dtype=tf.int32) input_mask = tf.placeholder( shape=[1, seq_length], name="input_mask", dtype=tf.int32) segment_ids = tf.placeholder( shape=[1, seq_length], name="segment_ids", dtype=tf.int32) inputs = { "input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids } return tf.estimator.export.ServingInputReceiver(features=inputs, receiver_tensors=inputs) return _seq_serving_input_fn
Example #2
Source File: lib_tfsampling.py From magenta with Apache License 2.0 | 6 votes |
def get_placeholders(self): hparams = self.hparams return dict( pianorolls=tf.placeholder( tf.bool, [None, None, hparams.num_pitches, hparams.num_instruments], "pianorolls"), # The default value is only used for checking if completion masker # should be evoked. It can't be used directly as the batch size # and length of pianorolls are unknown during static time. outer_masks=tf.placeholder_with_default( np.zeros( (1, 1, hparams.num_pitches, hparams.num_instruments), dtype=np.float32), [None, None, hparams.num_pitches, hparams.num_instruments], "outer_masks"), sample_steps=tf.placeholder_with_default(0, (), "sample_steps"), total_gibbs_steps=tf.placeholder_with_default( 0, (), "total_gibbs_steps"), current_step=tf.placeholder_with_default(0, (), "current_step"), temperature=tf.placeholder_with_default(0.99, (), "temperature"))
Example #3
Source File: rnn_test.py From magenta with Apache License 2.0 | 6 votes |
def testNoneDimsWithDynamicRNN(self): with self.session(use_gpu=True, graph=tf.Graph()) as sess: batch_size = 4 num_steps = 5 input_dim = 6 cell_size = 7 cell = contrib_rnn.LSTMBlockCell(cell_size) x = tf.placeholder(tf.float32, shape=(None, None, input_dim)) output, _ = tf.nn.dynamic_rnn( cell, x, time_major=True, dtype=tf.float32) sess.run(tf.global_variables_initializer()) feed = {} feed[x] = np.random.randn(num_steps, batch_size, input_dim) sess.run(output, feed)
Example #4
Source File: relative_bounds_test.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def test_linear_bounds_shape(self, dtype): batch_size = 11 input_size = 7 output_size = 5 w = tf.placeholder(dtype=dtype, shape=(input_size, output_size)) b = tf.placeholder(dtype=dtype, shape=(output_size,)) lb_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) ub_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) nominal = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal) bounds_out = bounds_in.apply_linear(None, w, b) lb_out, ub_out = bounds_out.lower, bounds_out.upper self.assertEqual(dtype, lb_out.dtype) self.assertEqual(dtype, ub_out.dtype) self.assertEqual((batch_size, output_size), lb_out.shape) self.assertEqual((batch_size, output_size), ub_out.shape)
Example #5
Source File: model_tf1.py From machine-learning-for-programming-samples with MIT License | 6 votes |
def build(self, input_shape): with self._sess.graph.as_default(): self._placeholders["tokens"] = tf.placeholder( dtype=tf.int32, shape=[None, None], name="tokens" ) self._ops["output_logits"] = self.compute_logits( self._placeholders["tokens"] ) self._ops["output_probs"] = tf.nn.softmax(self._ops["output_logits"], -1) result = self.compute_loss_and_acc( rnn_output_logits=self._ops["output_logits"], target_token_seq=self._placeholders["tokens"], ) self._ops["loss"] = result.token_ce_loss self._ops["num_tokens"] = result.num_predictions self._ops["num_correct_tokens"] = result.num_correct_token_predictions self._ops["train_step"] = self._make_training_step(self._ops["loss"]) init_op = tf.variables_initializer( self._sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ) self._sess.run(init_op)
Example #6
Source File: simplex_bounds_test.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def test_linear_simplex_bounds_shape(self, dtype): vocab_size = 103 batch_size = 11 input_size = 7 output_size = 5 w = tf.placeholder(dtype=dtype, shape=(input_size, output_size)) b = tf.placeholder(dtype=dtype, shape=(output_size,)) embedding = tf.placeholder(dtype=dtype, shape=(vocab_size, input_size)) centres = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) r = .2 bounds_in = ibp.SimplexBounds(embedding, centres, r) bounds_out = bounds_in.apply_linear(None, w, b) lb_out, ub_out = bounds_out.lower, bounds_out.upper self.assertEqual(dtype, lb_out.dtype) self.assertEqual(dtype, ub_out.dtype) self.assertEqual((batch_size, output_size), lb_out.shape) self.assertEqual((batch_size, output_size), ub_out.shape)
Example #7
Source File: robust_model.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def add_representer(self, vocab_filename, padded_token=None): """Add sentence representer to the computation graph. Args: vocab_filename: the name of vocabulary files. padded_token: padded_token to the vocabulary. """ self.embed_pad = utils.EmbedAndPad( self.batch_size, [self._lines_from_file(vocab_filename)], embedding_dim=self.embedding_dim, num_oov_buckets=self.num_oov_buckets, fine_tune_embeddings=self.fine_tune_embeddings, padded_token=padded_token) self.keep_prob = tf.placeholder(tf.float32, shape=None, name='keep_prob') # Model to get a sentence representation from embeddings. self.sentence_representer = models.SentenceRepresenterConv( self.config, keep_prob=self.keep_prob, pooling=self.pooling)
Example #8
Source File: player_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__(self, hparams, action_space, observation_space, policy_dir): assert hparams.base_algo == "ppo" ppo_hparams = trainer_lib.create_hparams(hparams.base_algo_params) frame_stack_shape = (1, hparams.frame_stack_size) + observation_space.shape self._frame_stack = np.zeros(frame_stack_shape, dtype=np.uint8) with tf.Graph().as_default(): self.obs_t = tf.placeholder(shape=self.frame_stack_shape, dtype=np.uint8) self.logits_t, self.value_function_t = get_policy( self.obs_t, ppo_hparams, action_space ) model_saver = tf.train.Saver( tf.global_variables(scope=ppo_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg ) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) trainer_lib.restore_checkpoint(policy_dir, model_saver, self.sess)
Example #9
Source File: rl_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__( self, batch_size, observation_space, action_space, policy_hparams, policy_dir, sampling_temp ): super(PolicyAgent, self).__init__( batch_size, observation_space, action_space ) self._sampling_temp = sampling_temp with tf.Graph().as_default(): self._observations_t = tf.placeholder( shape=((batch_size,) + self.observation_space.shape), dtype=self.observation_space.dtype ) (logits, self._values_t) = rl.get_policy( self._observations_t, policy_hparams, self.action_space ) actions = common_layers.sample_with_temperature(logits, sampling_temp) self._probs_t = tf.nn.softmax(logits / sampling_temp) self._actions_t = tf.cast(actions, tf.int32) model_saver = tf.train.Saver( tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg ) self._sess = tf.Session() self._sess.run(tf.global_variables_initializer()) trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
Example #10
Source File: video_metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def get_zipped_dataset_from_predictions(predictions): """Creates dataset from in-memory predictions.""" targets = stack_data_given_key(predictions, "targets") outputs = stack_data_given_key(predictions, "outputs") num_videos, num_steps = targets.shape[:2] # Truncate output time-steps to match target time-steps outputs = outputs[:, :num_steps] targets_placeholder = tf.placeholder(targets.dtype, targets.shape) outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape) dataset = tf.data.Dataset.from_tensor_slices( (targets_placeholder, outputs_placeholder)) iterator = dataset.make_initializable_iterator() feed_dict = {targets_placeholder: targets, outputs_placeholder: outputs} return iterator, feed_dict, num_videos
Example #11
Source File: data_test.py From magenta with Apache License 2.0 | 6 votes |
def testTfUnslicedKeyConditioned(self): converter = self.converter_class( steps_per_quarter=1, slice_bars=None, condition_on_key=True) with self.test_session() as sess: sequence = tf.placeholder(tf.string) input_tensors_, output_tensors_, control_tensors_, lengths_ = ( data.convert_to_tensors_op(sequence, converter)) input_tensors, output_tensors, control_tensors, lengths = sess.run( [input_tensors_, output_tensors_, control_tensors_, lengths_], feed_dict={sequence: self.sequence.SerializeToString()}) actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)] actual_unsliced_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)] actual_unsliced_key_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)] self.assertArraySetsEqual( self.labels_to_inputs(self.expected_unsliced_labels, converter), actual_input_tensors) self.assertArraySetsEqual( self.expected_unsliced_labels, actual_unsliced_labels) self.assertArraySetsEqual( self.expected_unsliced_key_labels, actual_unsliced_key_labels)
Example #12
Source File: data_test.py From magenta with Apache License 2.0 | 6 votes |
def testTfSlicedChordConditioned(self): converter = self.converter_class( steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None, chord_encoding=note_seq.MajorMinorChordOneHotEncoding()) with self.test_session() as sess: sequence = tf.placeholder(tf.string) input_tensors_, output_tensors_, control_tensors_, lengths_ = ( data.convert_to_tensors_op(sequence, converter)) input_tensors, output_tensors, control_tensors, lengths = sess.run( [input_tensors_, output_tensors_, control_tensors_, lengths_], feed_dict={sequence: self.sequence.SerializeToString()}) actual_sliced_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)] actual_sliced_chord_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)] self.assertArraySetsEqual( self.labels_to_inputs(self.expected_sliced_labels, converter), input_tensors) self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels) self.assertArraySetsEqual( self.expected_sliced_chord_labels, actual_sliced_chord_labels)
Example #13
Source File: data_test.py From magenta with Apache License 2.0 | 6 votes |
def testTfSliced(self): converter = self.converter_class( steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None) with self.test_session() as sess: sequence = tf.placeholder(tf.string) input_tensors_, output_tensors_, _, lengths_ = data.convert_to_tensors_op( sequence, converter) input_tensors, output_tensors, lengths = sess.run( [input_tensors_, output_tensors_, lengths_], feed_dict={sequence: self.sequence.SerializeToString()}) actual_sliced_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)] self.assertArraySetsEqual( self.labels_to_inputs(self.expected_sliced_labels, converter), input_tensors) self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
Example #14
Source File: simulated_batch_gym_env.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__(self, *args, **kwargs): with tf.Graph().as_default(): self._batch_env = SimulatedBatchEnv(*args, **kwargs) self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32) self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t) with tf.control_dependencies([self._rewards_t]): self._obs_t = self._batch_env.observ self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32) self._reset_op = self._batch_env.reset( tf.range(self.batch_size, dtype=tf.int32) ) self._sess = tf.Session() self._sess.run(tf.global_variables_initializer()) self._batch_env.initialize(self._sess)
Example #15
Source File: evaluator.py From graphics with Apache License 2.0 | 6 votes |
def _init_graph(self): """Initialize computation graph for tensorflow.""" with self.graph.as_default(): self.unet = g2g.UNet3D(in_grid_res=self.in_grid_res, out_grid_res=self.out_grid_res, num_filters=self.num_filters, max_filters=self.max_filters, out_features=self.out_features) self.input_grid_ph = tf.placeholder( tf.float32, [None, None, None]) self.input_grid = self.input_grid_ph[tf.newaxis, ..., tf.newaxis] self.feat_grid = self.unet(self.input_grid) self.saver = tf.train.Saver() self.sess = tf.Session() self.saver.restore(self.sess, self.ckpt)
Example #16
Source File: evaluator.py From graphics with Apache License 2.0 | 6 votes |
def _init_graph(self): """Initialize computation graph for tensorflow.""" with self.graph.as_default(): self.encoder = g2v.GridEncoder( in_grid_res=self.in_grid_res, num_filters=self.num_filters, codelen=self.codelen, name='g2v') self.global_step = tf.get_variable( 'global_step', shape=[], dtype=tf.int64) self.grid_ph = tf.placeholder( tf.float32, shape=[self.gres, self.gres, self.gres]) self.start_ph = tf.placeholder(tf.int32, shape=[self.grid_batch, 3]) self.ingrid = self._batch_slice(self.grid_ph, self.start_ph, self.in_grid_res, self.grid_batch) self.ingrid = self.ingrid[..., tf.newaxis] self.lats = self.encoder(self.ingrid, training=False) # [gb, codelen] self.saver = tf.train.Saver() self.sess = tf.Session() self.saver.restore(self.sess, self.ckpt)
Example #17
Source File: evaluator.py From graphics with Apache License 2.0 | 6 votes |
def _init_graph(self): """Initialize computation graph for tensorflow. """ with self.graph.as_default(): self.encoder = g2v.GridEncoder(in_grid_res=self.in_grid_res, num_filters=self.encoder_nf, codelen=self.codelen, name='g2v') self.grid_ph = tf.placeholder( tf.float32, shape=[None, self.in_grid_res, self.in_grid_res, self.in_grid_res, 1]) self.lats = self.encoder(self.grid_ph, training=False) # [gb, codelen] self.saver = tf.train.Saver() self.sess = tf.Session() self.saver.restore(self.sess, self.ckpt)
Example #18
Source File: evaluator.py From graphics with Apache License 2.0 | 6 votes |
def _init_graph(self): """Initialize computation graph for tensorflow. """ with self.graph.as_default(): self.refiner = im.ImNet(dim=self.dim, in_features=self.codelen, out_features=self.out_features, num_filters=self.num_filters) self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int64) self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3]) self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen]) lat = tf.broadcast_to(self.lat_ph[tf.newaxis], [self.point_batch, self.codelen]) code = tf.concat((self.pts_ph, lat), axis=-1) # [pb, 3+c] vals = self.refiner(code, training=False) # [pb, 1] self.vals = tf.squeeze(vals, axis=1) # [pb] self.saver = tf.train.Saver() self.sess = tf.Session() self.saver.restore(self.sess, self.ckpt)
Example #19
Source File: run_classifier.py From albert with Apache License 2.0 | 6 votes |
def serving_input_receiver_fn(): """Creates an input function for serving.""" seq_len = FLAGS.max_seq_length serialized_example = tf.placeholder( dtype=tf.string, shape=[None], name="serialized_example") features = { "input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64), "input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64), "segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64), } feature_map = tf.parse_example(serialized_example, features=features) feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32) feature_map["label_ids"] = tf.constant(0, dtype=tf.int32) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in feature_map.keys(): t = feature_map[name] if t.dtype == tf.int64: t = tf.to_int32(t) feature_map[name] = t return tf.estimator.export.ServingInputReceiver( features=feature_map, receiver_tensors=serialized_example)
Example #20
Source File: build_imagenet_data.py From morph-net with Apache License 2.0 | 6 votes |
def __init__(self): # Create a single Session to run all image coding calls. self._sess = tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
Example #21
Source File: data_test.py From magenta with Apache License 2.0 | 6 votes |
def _ValidateProvideBatchPlaceholder(self, truncated_length, batch_size, lengths, expected_num_inputs): examples, expected_inputs = self._CreateExamplesAndExpectedInputs( truncated_length, lengths, expected_num_inputs) examples_ph = tf.placeholder(tf.string, [None]) feed_dict = {examples_ph: [e.SerializeToString() for e in examples]} self._ValidateProvideBatch( examples_ph, truncated_length, batch_size, expected_inputs, feed_dict=feed_dict)
Example #22
Source File: data_test.py From magenta with Apache License 2.0 | 6 votes |
def testTfUnsliced(self): converter = self.converter_class(steps_per_quarter=1, slice_bars=None) with self.test_session() as sess: sequence = tf.placeholder(tf.string) input_tensors_, output_tensors_, _, lengths_ = data.convert_to_tensors_op( sequence, converter) input_tensors, output_tensors, lengths = sess.run( [input_tensors_, output_tensors_, lengths_], feed_dict={sequence: self.sequence.SerializeToString()}) actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)] actual_unsliced_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)] self.assertArraySetsEqual( self.labels_to_inputs(self.expected_unsliced_labels, converter), actual_input_tensors) self.assertArraySetsEqual( self.expected_unsliced_labels, actual_unsliced_labels)
Example #23
Source File: gym_env.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__(self, batch_size, *args, **kwargs): self._store_rollouts = kwargs.pop("store_rollouts", True) super(T2TEnv, self).__init__(*args, **kwargs) self.batch_size = batch_size self._rollouts_by_epoch_and_split = collections.OrderedDict() self.current_epoch = None self._should_preprocess_on_reset = True with tf.Graph().as_default() as tf_graph: self._tf_graph = _Noncopyable(tf_graph) self._decoded_image_p = _Noncopyable( tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) ) self._encoded_image_t = _Noncopyable( tf.image.encode_png(self._decoded_image_p.obj) ) self._encoded_image_p = _Noncopyable(tf.placeholder(tf.string)) self._decoded_image_t = _Noncopyable( tf.image.decode_png(self._encoded_image_p.obj) ) self._session = _Noncopyable(tf.Session())
Example #24
Source File: data_test.py From magenta with Apache License 2.0 | 6 votes |
def testTfUnslicedChordConditioned(self): converter = self.converter_class( steps_per_quarter=1, slice_bars=None, chord_encoding=note_seq.MajorMinorChordOneHotEncoding()) with self.test_session() as sess: sequence = tf.placeholder(tf.string) input_tensors_, output_tensors_, control_tensors_, lengths_ = ( data.convert_to_tensors_op(sequence, converter)) input_tensors, output_tensors, control_tensors, lengths = sess.run( [input_tensors_, output_tensors_, control_tensors_, lengths_], feed_dict={sequence: self.sequence.SerializeToString()}) actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)] actual_unsliced_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)] actual_unsliced_chord_labels = [ np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)] self.assertArraySetsEqual( self.labels_to_inputs(self.expected_unsliced_labels, converter), actual_input_tensors) self.assertArraySetsEqual( self.expected_unsliced_labels, actual_unsliced_labels) self.assertArraySetsEqual( self.expected_unsliced_chord_labels, actual_unsliced_chord_labels)
Example #25
Source File: common_joint.py From magenta with Apache License 2.0 | 5 votes |
def get_summary(self, sess, key, value): """Get TF (scalar) summary. Args: sess: A TF Session to be used in making summary. key: A string indicating the name of summary. value: A string indicating the value of summary. Returns: A TF summary. """ self._add_key_if_not_exists(key) placeholder, summary = self._key_to_ph_summary_tuple[key] return sess.run(summary, {placeholder: value})
Example #26
Source File: tensorspec_utils.py From tensor2robot with Apache License 2.0 | 5 votes |
def map_feed_dict_unsafe(feature_placeholders_spec, np_inputs_spec): """Deprecated function to create a feed_dict to be passed to session.run. tensorspec_utils.map_feed_dict should be used instead. map_feed_dict_unsafe does not check that there is actually any agreement between feature_placeholders_spec or np_inputs spec in terms of dtype, shape or additional unused attributes within np_inputs_spec. Args: feature_placeholders_spec: An TensorSpecStruct containing {str: tf.placeholder}. np_inputs_spec: The numpy input according to the same spec. Returns: A mapping {placeholder: np.ndarray} which can be fed to a tensorflow session.run. """ logging.warning('map_feed_dict_unsafe is deprecated. ' 'Please update to map_feed_dict.') flat_spec = flatten_spec_structure(feature_placeholders_spec) flat_np_inputs = flatten_spec_structure(np_inputs_spec) for key, value in flat_np_inputs.items(): if key not in flat_spec: logging.warn( 'np_inputs has an input: %s, not found in the tensorspec.', key) feed_dict = {} for key, value in flat_spec.items(): feed_dict[value] = flat_np_inputs[key] return feed_dict
Example #27
Source File: common_joint.py From magenta with Apache License 2.0 | 5 votes |
def _add_key_if_not_exists(self, key): """Add related TF heads for a key if it is not used before.""" if key in self._key_to_ph_summary_tuple: return placeholder = tf.placeholder(tf.float32, shape=(), name=key + '_ph') summary = tf.summary.scalar(key, placeholder) self._key_to_ph_summary_tuple[key] = (placeholder, summary)
Example #28
Source File: tensorspec_utils_test.py From tensor2robot with Apache License 2.0 | 5 votes |
def test_from_placeholder(self): unknown = tf.placeholder(tf.int64, name='unknown') partial = tf.placeholder(tf.float32, shape=[None, 1], name='partial') spec_1 = utils.ExtendedTensorSpec.from_tensor(unknown) self.assertEqual(spec_1.dtype, tf.int64) self.assertEqual(spec_1.shape, None) self.assertEqual(spec_1.name, 'unknown') spec_2 = utils.ExtendedTensorSpec.from_tensor(partial) self.assertEqual(spec_2.dtype, tf.float32) self.assertEqual(spec_2.shape.as_list(), [None, 1]) self.assertEqual(spec_2.name, 'partial')
Example #29
Source File: export_checkpoints.py From albert with Apache License 2.0 | 5 votes |
def build_model(sess): """Module function.""" input_ids = tf.placeholder(tf.int32, [None, None], "input_ids") input_mask = tf.placeholder(tf.int32, [None, None], "input_mask") segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids") mlm_positions = tf.placeholder(tf.int32, [None, None], "mlm_positions") albert_config_path = os.path.join( FLAGS.albert_directory, "albert_config.json") albert_config = modeling.AlbertConfig.from_json_file(albert_config_path) model = modeling.AlbertModel( config=albert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=False) get_mlm_logits(model.get_sequence_output(), albert_config, mlm_positions, model.get_embedding_table()) get_sentence_order_logits(model.get_pooled_output(), albert_config) checkpoint_path = os.path.join(FLAGS.albert_directory, FLAGS.checkpoint_name) tvars = tf.trainable_variables() (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, checkpoint_path) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) tf.train.init_from_checkpoint(checkpoint_path, assignment_map) init = tf.global_variables_initializer() sess.run(init) return sess
Example #30
Source File: tensorspec_utils_test.py From tensor2robot with Apache License 2.0 | 5 votes |
def test_shape_compatibility(self): unknown = tf.placeholder(tf.int64) partial = tf.placeholder(tf.int64, shape=[None, 1]) full = tf.placeholder(tf.int64, shape=[2, 3]) rank3 = tf.placeholder(tf.int64, shape=[4, 5, 6]) desc_unknown = utils.ExtendedTensorSpec(None, tf.int64) self.assertTrue(desc_unknown.is_compatible_with(unknown)) self.assertTrue(desc_unknown.is_compatible_with(partial)) self.assertTrue(desc_unknown.is_compatible_with(full)) self.assertTrue(desc_unknown.is_compatible_with(rank3)) desc_partial = utils.ExtendedTensorSpec([2, None], tf.int64) self.assertTrue(desc_partial.is_compatible_with(unknown)) self.assertTrue(desc_partial.is_compatible_with(partial)) self.assertTrue(desc_partial.is_compatible_with(full)) self.assertFalse(desc_partial.is_compatible_with(rank3)) desc_full = utils.ExtendedTensorSpec([2, 3], tf.int64) self.assertTrue(desc_full.is_compatible_with(unknown)) self.assertFalse(desc_full.is_compatible_with(partial)) self.assertTrue(desc_full.is_compatible_with(full)) self.assertFalse(desc_full.is_compatible_with(rank3)) desc_rank3 = utils.ExtendedTensorSpec([4, 5, 6], tf.int64) self.assertTrue(desc_rank3.is_compatible_with(unknown)) self.assertFalse(desc_rank3.is_compatible_with(partial)) self.assertFalse(desc_rank3.is_compatible_with(full)) self.assertTrue(desc_rank3.is_compatible_with(rank3))