Python model_utils.get_train_op() Examples
The following are 6
code examples of model_utils.get_train_op().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
model_utils
, or try the search function
.
Example #1
Source File: run_embed.py From xlnet_extension_tf with Apache License 2.0 | 5 votes |
def get_model_fn(self, model_config, run_config, init_checkpoint, model_type): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] embeddings = self._create_model(model_config, run_config, input_ids, input_mask, segment_ids, model_type) scaffold_fn = model_utils.init_from_checkpoint(FLAGS) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: loss = tf.Variable(0.0, name="loss", dtype=tf.float32) train_op, _, _ = model_utils.get_train_op(FLAGS, loss) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ "embeddings": embeddings }, scaffold_fn=scaffold_fn) return output_spec return model_fn
Example #2
Source File: train.py From embedding-as-service with MIT License | 4 votes |
def get_model_fn(): """doc.""" def model_fn(features, labels, mode, params): """doc.""" #### Training or Evaluation is_training = (mode == tf.estimator.ModeKeys.TRAIN) assert is_training #### Retrieve `mems` from `params["cache"]` mems = {} idx = 0 if FLAGS.mem_len > 0: mems["mems"] = params["cache"] #### Get loss from inputs total_loss, new_mems, monitor_dict = function_builder.get_loss( FLAGS, features, labels, mems, is_training) #### Turn `new_mems` into `new_cache` new_cache = [] if FLAGS.mem_len > 0: new_cache += new_mems["mems"] #### Check model parameters num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()]) tf.logging.info("#params: {}".format(num_params)) #### Configuring the optimizer train_op, learning_rate, gnorm = model_utils.get_train_op( FLAGS, total_loss) monitor_dict["lr"] = learning_rate monitor_dict["gnorm"] = gnorm #### Customized initial checkpoint scaffold_fn = model_utils.init_from_checkpoint(FLAGS, global_vars=True) #### Creating host calls host_call = function_builder.construct_scalar_host_call( monitor_dict=monitor_dict, model_dir=FLAGS.model_dir, prefix="train/", reduce_fn=tf.reduce_mean) #### Constucting training TPUEstimatorSpec with new cache. train_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) train_spec.cache = new_cache return train_spec return model_fn
Example #3
Source File: train.py From xlnet with Apache License 2.0 | 4 votes |
def get_model_fn(): """doc.""" def model_fn(features, labels, mode, params): """doc.""" #### Training or Evaluation is_training = (mode == tf.estimator.ModeKeys.TRAIN) assert is_training #### Retrieve `mems` from `params["cache"]` mems = {} idx = 0 if FLAGS.mem_len > 0: mems["mems"] = params["cache"] #### Get loss from inputs total_loss, new_mems, monitor_dict = function_builder.get_loss( FLAGS, features, labels, mems, is_training) #### Turn `new_mems` into `new_cache` new_cache = [] if FLAGS.mem_len > 0: new_cache += new_mems["mems"] #### Check model parameters num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()]) tf.logging.info("#params: {}".format(num_params)) #### Configuring the optimizer train_op, learning_rate, gnorm = model_utils.get_train_op( FLAGS, total_loss) monitor_dict["lr"] = learning_rate monitor_dict["gnorm"] = gnorm #### Customized initial checkpoint scaffold_fn = model_utils.init_from_checkpoint(FLAGS, global_vars=True) #### Creating host calls host_call = function_builder.construct_scalar_host_call( monitor_dict=monitor_dict, model_dir=FLAGS.model_dir, prefix="train/", reduce_fn=tf.reduce_mean) #### Constucting training TPUEstimatorSpec with new cache. train_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) train_spec.cache = new_cache return train_spec return model_fn
Example #4
Source File: run_squad.py From xlnet_extension_tf with Apache License 2.0 | 4 votes |
def get_model_fn(self): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) is_training = (mode == tf.estimator.ModeKeys.TRAIN) unique_id = features["unique_id"] input_ids = features["input_ids"] input_mask = features["input_mask"] p_mask = features["p_mask"] segment_ids = features["segment_ids"] cls_index = features["cls_index"] if is_training: start_position = features["start_position"] end_position = features["end_position"] is_impossible = features["is_impossible"] else: start_position = None end_position = None is_impossible = None loss, predicts = self._create_model(is_training, input_ids, input_mask, p_mask, segment_ids, cls_index, start_position, end_position, is_impossible) scaffold_fn = model_utils.init_from_checkpoint(FLAGS) output_spec = None if is_training: train_op, _, _ = model_utils.get_train_op(FLAGS, loss) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ "unique_id": unique_id, "answer_prob": predicts["answer_prob"], "start_prob": predicts["start_prob"], "start_index": predicts["start_index"], "end_prob": predicts["end_prob"], "end_index": predicts["end_index"] }, scaffold_fn=scaffold_fn) return output_spec return model_fn
Example #5
Source File: run_ner.py From xlnet_extension_tf with Apache License 2.0 | 4 votes |
def get_model_fn(self, label_list): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" def metric_fn(label_ids, predict_ids): precision = tf.metrics.precision(labels=label_ids, predictions=predict_ids) recall = tf.metrics.recall(labels=label_ids, predictions=predict_ids) metric = { "precision": precision, "recall": recall, } return metric tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_masks = features["input_masks"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL] else None loss, predict_ids = self._create_model(input_ids, input_masks, segment_ids, label_ids, label_list, mode) scaffold_fn = model_utils.init_from_checkpoint(FLAGS) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op, _, _ = model_utils.get_train_op(FLAGS, loss) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: masked_label_ids = self._get_masked_data(label_ids, label_list) masked_predict_ids = self._get_masked_data(predict_ids, label_list) eval_metrics = (metric_fn, [masked_label_ids, masked_predict_ids]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ "predict": predict_ids }, scaffold_fn=scaffold_fn) return output_spec return model_fn
Example #6
Source File: run_classifier.py From xlnet_extension_tf with Apache License 2.0 | 4 votes |
def get_model_fn(self, sent_label_list): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" def metric_fn(sent_label_ids, sent_predict_ids): sent_accuracy = tf.metrics.accuracy(labels=sent_label_ids, predictions=sent_predict_ids) metric = { "sent_accuracy": sent_accuracy, } return metric tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_masks = features["input_masks"] segment_ids = features["segment_ids"] sent_label_ids = features["sent_label_ids"] if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL] else None loss, sent_predict_ids, sent_predict_scores, sent_predict_probs = self._create_model(input_ids, input_masks, segment_ids, sent_label_ids, sent_label_list, mode) scaffold_fn = model_utils.init_from_checkpoint(FLAGS) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op, _, _ = model_utils.get_train_op(FLAGS, loss) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: eval_metrics = (metric_fn, [sent_label_ids, sent_predict_ids]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ "sent_predict_id": sent_predict_ids, "sent_predict_score": sent_predict_scores, "sent_predict_prob": sent_predict_probs }, scaffold_fn=scaffold_fn) return output_spec return model_fn