Python mlflow.log_metrics() Examples
The following are 12
code examples of mlflow.log_metrics().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mlflow
, or try the search function
.
Example #1
Source File: train.py From mlflow with Apache License 2.0 | 6 votes |
def on_epoch_end(self, epoch, logs=None): """ Log Keras metrics with MLflow. If model improved on the validation data, evaluate it on a test set and store it as the best model. """ if not logs: return self._next_step = epoch + 1 train_loss = logs["loss"] val_loss = logs["val_loss"] mlflow.log_metrics({ self.train_loss: train_loss, self.val_loss: val_loss }, step=epoch) if val_loss < self._best_val_loss: # The result improved in the validation set. # Log the model with mlflow and also evaluate and log on test set. self._best_train_loss = train_loss self._best_val_loss = val_loss self._best_model = keras.models.clone_model(self.model) self._best_model.set_weights([x.copy() for x in self.model.get_weights()]) preds = self._best_model.predict(self._test_x) eval_and_log_metrics("test", self._test_y, preds, epoch)
Example #2
Source File: mlflow.py From tf-yarn with Apache License 2.0 | 5 votes |
def log_metrics(metrics: Dict[str, Any], step: int = None): mlflow.log_metrics(metrics, step)
Example #3
Source File: utils.py From FARM with Apache License 2.0 | 5 votes |
def log_metrics(cls, metrics, step): raise NotImplementedError()
Example #4
Source File: utils.py From FARM with Apache License 2.0 | 5 votes |
def log_metrics(cls, metrics, step): logger.info(f"Logged metrics at step {step}: \n {metrics}")
Example #5
Source File: utils.py From FARM with Apache License 2.0 | 5 votes |
def log_metrics(cls, metrics, step): try: mlflow.log_metrics(metrics, step=step) except ConnectionError: logger.warning(f"ConnectionError in logging metrics to MLFlow.") except Exception as e: logger.warning(f"Failed to log metrics: {e}")
Example #6
Source File: utils.py From FARM with Apache License 2.0 | 5 votes |
def log_metrics(cls, metrics, step): for key, value in metrics.items(): TensorBoardLogger.summary_writer.add_scalar( tag=key, scalar_value=value, global_step=step )
Example #7
Source File: meter.py From theconf with MIT License | 5 votes |
def log(self, prefix, step=None, tensorboard=True, mlflow=False): step = step if step is not None else self.step if self.tensorboard_path and tensorboard: for key, value in self.get().items(): self.writers[prefix].add_scalar('metrics/%s' % key, value, global_step=step) if mlflow: module_mlflow.log_metrics(self.get(prefix=prefix), step=step)
Example #8
Source File: tensorflow.py From mlflow with Apache License 2.0 | 5 votes |
def on_epoch_end(self, epoch, logs=None): if (epoch-1) % _LOG_EVERY_N_STEPS == 0: try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
Example #9
Source File: train.py From mlflow with Apache License 2.0 | 5 votes |
def main(): # parse command-line arguments args = parse_args() # prepare train and test data iris = datasets.load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) train_set = lgb.Dataset(X_train, label=y_train) # enable auto logging mlflow.lightgbm.autolog() with mlflow.start_run(): # train model params = { 'objective': 'multiclass', 'num_class': 3, 'learning_rate': args.learning_rate, 'metric': 'multi_logloss', 'colsample_bytree': args.colsample_bytree, 'subsample': args.subsample, 'seed': 42, } model = lgb.train(params, train_set, num_boost_round=10, valid_sets=[train_set], valid_names=['train']) # evaluate model y_proba = model.predict(X_test) y_pred = y_proba.argmax(axis=1) loss = log_loss(y_test, y_proba) acc = accuracy_score(y_test, y_pred) # log metrics mlflow.log_metrics({'log_loss': loss, 'accuracy': acc})
Example #10
Source File: train.py From mlflow with Apache License 2.0 | 5 votes |
def main(): # parse command-line arguments args = parse_args() # prepare train and test data iris = datasets.load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) # enable auto logging mlflow.xgboost.autolog() with mlflow.start_run(): # train model params = { 'objective': 'multi:softprob', 'num_class': 3, 'learning_rate': args.learning_rate, 'eval_metric': 'mlogloss', 'colsample_bytree': args.colsample_bytree, 'subsample': args.subsample, 'seed': 42, } model = xgb.train(params, dtrain, evals=[(dtrain, 'train')]) # evaluate model y_proba = model.predict(dtest) y_pred = y_proba.argmax(axis=1) loss = log_loss(y_test, y_proba) acc = accuracy_score(y_test, y_pred) # log metrics mlflow.log_metrics({'log_loss': loss, 'accuracy': acc})
Example #11
Source File: test_tracking.py From mlflow with Apache License 2.0 | 5 votes |
def test_log_metrics_uses_millisecond_timestamp_resolution_fluent(): with start_run() as active_run, mock.patch("time.time") as time_mock: time_mock.side_effect = lambda: 123 mlflow.log_metrics({ "name_1": 25, "name_2": -3, }) mlflow.log_metrics({ "name_1": 30, }) mlflow.log_metrics({ "name_1": 40, }) run_id = active_run.info.run_id client = tracking.MlflowClient() metric_history_name1 = client.get_metric_history(run_id, "name_1") assert set([(m.value, m.timestamp) for m in metric_history_name1]) == set([ (25, 123 * 1000), (30, 123 * 1000), (40, 123 * 1000), ]) metric_history_name2 = client.get_metric_history(run_id, "name_2") assert set([(m.value, m.timestamp) for m in metric_history_name2]) == set([ (-3, 123 * 1000), ])
Example #12
Source File: keras_mlflow.py From optuna with MIT License | 5 votes |
def mlflow_callback(study, trial): trial_value = trial.value if trial.value is not None else float("nan") with mlflow.start_run(run_name=study.study_name): mlflow.log_params(trial.params) mlflow.log_metrics({"mean_squared_error": trial_value})