Python mlflow.end_run() Examples

The following are 24 code examples of mlflow.end_run(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mlflow , or try the search function .
Example #1
Source File: test_model_export_with_class_and_artifacts.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_log_model_no_registered_model_name(sklearn_knn_model, main_scoped_model_class):
    register_model_patch = mock.patch("mlflow.register_model")
    with register_model_patch:
        sklearn_artifact_path = "sk_model_no_run"
        with mlflow.start_run():
            mlflow.sklearn.log_model(sk_model=sklearn_knn_model,
                                     artifact_path=sklearn_artifact_path)
            sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format(
                run_id=mlflow.active_run().info.run_id,
                artifact_path=sklearn_artifact_path)

        def test_predict(sk_model, model_input):
            return sk_model.predict(model_input) * 2

        pyfunc_artifact_path = "pyfunc_model"
        assert mlflow.active_run() is None
        mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
                                artifacts={"sk_model": sklearn_model_uri},
                                python_model=main_scoped_model_class(test_predict))
        mlflow.register_model.assert_not_called()
        mlflow.end_run() 
Example #2
Source File: test_model_export_with_class_and_artifacts.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_log_model_calls_register_model(sklearn_knn_model, main_scoped_model_class):
    register_model_patch = mock.patch("mlflow.register_model")
    with register_model_patch:
        sklearn_artifact_path = "sk_model_no_run"
        with mlflow.start_run():
            mlflow.sklearn.log_model(sk_model=sklearn_knn_model,
                                     artifact_path=sklearn_artifact_path)
            sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format(
                run_id=mlflow.active_run().info.run_id,
                artifact_path=sklearn_artifact_path)

        def test_predict(sk_model, model_input):
            return sk_model.predict(model_input) * 2

        pyfunc_artifact_path = "pyfunc_model"
        assert mlflow.active_run() is None
        mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
                                artifacts={"sk_model": sklearn_model_uri},
                                python_model=main_scoped_model_class(test_predict),
                                registered_model_name="AdsModel1")
        model_uri = "runs:/{run_id}/{artifact_path}".format(run_id=mlflow.active_run().info.run_id,
                                                            artifact_path=pyfunc_artifact_path)
        mlflow.register_model.assert_called_once_with(model_uri, "AdsModel1")
        mlflow.end_run() 
Example #3
Source File: test_model_export_with_class_and_artifacts.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_pyfunc_model_log_load_no_active_run(sklearn_knn_model, main_scoped_model_class, iris_data):
    sklearn_artifact_path = "sk_model_no_run"
    with mlflow.start_run():
        mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path)
        sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format(
            run_id=mlflow.active_run().info.run_id,
            artifact_path=sklearn_artifact_path)

    def test_predict(sk_model, model_input):
        return sk_model.predict(model_input) * 2

    pyfunc_artifact_path = "pyfunc_model"
    assert mlflow.active_run() is None
    mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
                            artifacts={"sk_model": sklearn_model_uri},
                            python_model=main_scoped_model_class(test_predict))
    pyfunc_model_uri = "runs:/{run_id}/{artifact_path}".format(
        run_id=mlflow.active_run().info.run_id,
        artifact_path=pyfunc_artifact_path)
    loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_uri)
    np.testing.assert_array_equal(
        loaded_pyfunc_model.predict(iris_data[0]),
        test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]))
    mlflow.end_run() 
Example #4
Source File: experiment.py    From nyaggle with MIT License 6 votes vote down vote up
def stop(self):
        """
        Stop current experiment.
        """
        self._save_dict(self.metrics, 'metrics.json')
        self._save_dict(self.params, 'params.json')

        if not self.is_custom:
            for h in self.logger.handlers:
                h.close()

        if self.with_mlflow:
            import mlflow
            from mlflow.exceptions import MlflowException

            try:
                mlflow.log_artifact(self.log_path)
                mlflow.log_artifact(os.path.join(self.logging_directory, 'metrics.json'))
                mlflow.log_artifact(os.path.join(self.logging_directory, 'params.json'))
            except MlflowException as e:
                warnings.warn('Error in saving artifacts to mlflow. The result may not be saved.: {}'.format(e))
            if not self.inherit_existing_run:
                mlflow.end_run() 
Example #5
Source File: test_run.py    From nyaggle with MIT License 6 votes vote down vote up
def test_inherit_outer_scope_run(tmpdir_name):
    mlflow.start_run()
    mlflow.log_param('foo', 1)

    params = {
        'objective': 'binary',
        'max_depth': 8
    }
    X, y = make_classification_df()

    run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name)

    assert mlflow.active_run() is not None  # still valid

    client = mlflow.tracking.MlflowClient()
    data = client.get_run(mlflow.active_run().info.run_id).data

    assert data.metrics['Overall'] > 0  # recorded

    mlflow.end_run() 
Example #6
Source File: test_run.py    From nyaggle with MIT License 6 votes vote down vote up
def test_ignore_errors_in_mlflow_params(tmpdir_name):
    mlflow.start_run()
    mlflow.log_param('features', 'ABC')
    mlflow.log_metric('Overall', -99)

    params = {
        'objective': 'binary',
        'max_depth': 8
    }
    X, y = make_classification_df()

    result = run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name, feature_list=[])

    client = mlflow.tracking.MlflowClient()
    data = client.get_run(mlflow.active_run().info.run_id).data

    assert data.metrics['Overall'] == result.metrics[-1]
    assert data.params['features'] == 'ABC'  # params cannot be overwritten

    mlflow.end_run() 
Example #7
Source File: test_model_export_with_loader_module_and_data_path.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_model_log_load_no_active_run(sklearn_knn_model, iris_data, tmpdir):
    sk_model_path = os.path.join(str(tmpdir), "knn.pkl")
    with open(sk_model_path, "wb") as f:
        pickle.dump(sklearn_knn_model, f)

    pyfunc_artifact_path = "pyfunc_model"
    assert mlflow.active_run() is None
    mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
                            data_path=sk_model_path,
                            loader_module=os.path.basename(__file__)[:-3],
                            code_path=[__file__])
    pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format(
        run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path))

    model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel"))
    assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors
    assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
    reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)
    np.testing.assert_array_equal(
        sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0]))
    mlflow.end_run() 
Example #8
Source File: test_keras_model_export.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_model_log(model, data, predicted):
    x, _ = data
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        try:
            if should_start_run:
                mlflow.start_run()
            artifact_path = "keras_model"
            mlflow.keras.log_model(model, artifact_path=artifact_path)
            model_uri = "runs:/{run_id}/{artifact_path}".format(
                run_id=mlflow.active_run().info.run_id,
                artifact_path=artifact_path)

            # Load model
            model_loaded = mlflow.keras.load_model(model_uri=model_uri)
            assert all(model_loaded.predict(x) == predicted)

            # Loading pyfunc model
            pyfunc_loaded = mlflow.pyfunc.load_model(model_uri=model_uri)
            assert all(pyfunc_loaded.predict(x).values == predicted)
        finally:
            mlflow.end_run() 
Example #9
Source File: test_onnx_model_export.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_model_log(onnx_model, onnx_custom_env):
    # pylint: disable=unused-argument

    import onnx
    import mlflow.onnx
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        try:
            if should_start_run:
                mlflow.start_run()
            artifact_path = "onnx_model"
            mlflow.onnx.log_model(onnx_model=onnx_model,
                                  artifact_path=artifact_path,
                                  conda_env=onnx_custom_env)
            model_uri = "runs:/{run_id}/{artifact_path}".format(
                run_id=mlflow.active_run().info.run_id,
                artifact_path=artifact_path)

            # Load model
            onnx.checker.check_model = mock.Mock()
            mlflow.onnx.load_model(model_uri)
            assert onnx.checker.check_model.called
        finally:
            mlflow.end_run() 
Example #10
Source File: test_fluent.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_delete_tag():
    """
    Confirm that fluent API delete tags actually works
    :return:
    """
    mlflow.set_tag('a', 'b')
    run = MlflowClient().get_run(mlflow.active_run().info.run_id)
    print(run.info.run_id)
    assert 'a' in run.data.tags
    mlflow.delete_tag('a')
    run = MlflowClient().get_run(mlflow.active_run().info.run_id)
    assert 'a' not in run.data.tags
    with pytest.raises(MlflowException):
        mlflow.delete_tag('a')
    with pytest.raises(MlflowException):
        mlflow.delete_tag('b')
    mlflow.end_run() 
Example #11
Source File: test_h2o_model_export.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_model_log(h2o_iris_model):
    h2o_model = h2o_iris_model.model
    old_uri = mlflow.get_tracking_uri()
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        with TempDir(chdr=True, remove_on_exit=True):
            try:
                artifact_path = "gbm_model"
                mlflow.set_tracking_uri("test")
                if should_start_run:
                    mlflow.start_run()
                mlflow.h2o.log_model(h2o_model=h2o_model, artifact_path=artifact_path)
                model_uri = "runs:/{run_id}/{artifact_path}".format(
                    run_id=mlflow.active_run().info.run_id,
                    artifact_path=artifact_path)

                # Load model
                h2o_model_loaded = mlflow.h2o.load_model(model_uri=model_uri)
                assert all(
                    h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() ==
                    h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_uri) 
Example #12
Source File: test_spark_model_export.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def test_sparkml_estimator_model_log(tmpdir, spark_model_estimator):
    # Print the coefficients and intercept for multinomial logistic regression
    old_tracking_uri = mlflow.get_tracking_uri()
    cnt = 0
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        for dfs_tmp_dir in [None, os.path.join(str(tmpdir), "test")]:
            print("should_start_run =", should_start_run, "dfs_tmp_dir =", dfs_tmp_dir)
            try:
                tracking_dir = os.path.abspath(str(tmpdir.join("mlruns")))
                mlflow.set_tracking_uri("file://%s" % tracking_dir)
                if should_start_run:
                    mlflow.start_run()
                artifact_path = "model%d" % cnt
                cnt += 1
                sparkm.log_model(
                    artifact_path=artifact_path,
                    spark_model=spark_model_estimator.model,
                    dfs_tmpdir=dfs_tmp_dir)
                model_uri = "runs:/{run_id}/{artifact_path}".format(
                    run_id=mlflow.active_run().info.run_id,
                    artifact_path=artifact_path)

                # test reloaded model
                reloaded_model = sparkm.load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmp_dir)
                preds_df = reloaded_model.transform(spark_model_estimator.spark_df)
                preds = [x.prediction for x in preds_df.select("prediction").collect()]
                assert spark_model_estimator.predictions == preds
            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_tracking_uri)
                x = dfs_tmp_dir or sparkm.DFS_TMP
                shutil.rmtree(x)
                shutil.rmtree(tracking_dir) 
Example #13
Source File: mlflow_logger.py    From ignite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def close(self):
        import mlflow

        mlflow.end_run() 
Example #14
Source File: test_fastai_autolog.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def manual_run(request, tracking_uri_mock):
    if request.param:
        mlflow.start_run()
    yield
    mlflow.end_run() 
Example #15
Source File: test_spark_datasource_autologging.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def test_autologging_does_not_start_run(spark_session, format_to_file_path):
    try:
        mlflow.spark.autolog()
        data_format = list(format_to_file_path.keys())[0]
        file_path = format_to_file_path[data_format]
        df = spark_session.read.format(data_format).option("header", "true"). \
            option("inferSchema", "true").load(file_path)
        df.collect()
        time.sleep(1)
        active_run = mlflow.active_run()
        assert active_run is None
        assert len(mlflow.search_runs()) == 0
    finally:
        mlflow.end_run() 
Example #16
Source File: test_tensorflow2_autolog.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def manual_run(request):
    if request.param:
        mlflow.start_run()
    yield
    mlflow.end_run() 
Example #17
Source File: utils.py    From FARM with Apache License 2.0 5 votes vote down vote up
def end_run(cls):
        logger.info(f"**** End of Experiment **** ") 
Example #18
Source File: test_spark_model_export.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def test_sparkml_model_log(tmpdir, spark_model_iris):
    # Print the coefficients and intercept for multinomial logistic regression
    old_tracking_uri = mlflow.get_tracking_uri()
    cnt = 0
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        for dfs_tmp_dir in [None, os.path.join(str(tmpdir), "test")]:
            print("should_start_run =", should_start_run, "dfs_tmp_dir =", dfs_tmp_dir)
            try:
                tracking_dir = os.path.abspath(str(tmpdir.join("mlruns")))
                mlflow.set_tracking_uri("file://%s" % tracking_dir)
                if should_start_run:
                    mlflow.start_run()
                artifact_path = "model%d" % cnt
                cnt += 1
                sparkm.log_model(artifact_path=artifact_path, spark_model=spark_model_iris.model,
                                 dfs_tmpdir=dfs_tmp_dir)
                model_uri = "runs:/{run_id}/{artifact_path}".format(
                    run_id=mlflow.active_run().info.run_id,
                    artifact_path=artifact_path)

                # test reloaded model
                reloaded_model = sparkm.load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmp_dir)
                preds_df = reloaded_model.transform(spark_model_iris.spark_df)
                preds = [x.prediction for x in preds_df.select("prediction").collect()]
                assert spark_model_iris.predictions == preds
            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_tracking_uri)
                x = dfs_tmp_dir or sparkm.DFS_TMP
                shutil.rmtree(x)
                shutil.rmtree(tracking_dir) 
Example #19
Source File: test_autologging_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def start_run():
    mlflow.start_run()
    yield
    mlflow.end_run() 
Example #20
Source File: meter.py    From theconf with MIT License 5 votes vote down vote up
def close(self, mlflow=False):
        for prefix, writer in self.writers.items():
            writer.close()
        if mlflow:
            module_mlflow.end_run() 
Example #21
Source File: loggers.py    From OpenKiwi with GNU Affero General Public License v3.0 5 votes vote down vote up
def end_run():
        mlflow.end_run() 
Example #22
Source File: loggers.py    From OpenKiwi with GNU Affero General Public License v3.0 5 votes vote down vote up
def configure(
        self,
        run_uuid,
        experiment_name,
        tracking_uri,
        run_name=None,
        always_log_artifacts=False,
        create_run=True,
        create_experiment=True,
        nest_run=True,
    ):
        if mlflow.active_run() and not nest_run:
            logger.info('Ending previous MLFlow run: {}.'.format(self.run_uuid))
            mlflow.end_run()

        self.always_log_artifacts = always_log_artifacts
        self._experiment_name = experiment_name
        self._run_name = run_name

        # MLflow specific
        if tracking_uri:
            mlflow.set_tracking_uri(tracking_uri)

        if run_uuid:
            existing_run = MlflowClient().get_run(run_uuid)
            if not existing_run and not create_run:
                raise FileNotFoundError(
                    'Run ID {} not found under {}'.format(
                        run_uuid, mlflow.get_tracking_uri()
                    )
                )

        experiment_id = self._retrieve_mlflow_experiment_id(
            experiment_name, create=create_experiment
        )
        return mlflow.start_run(
            run_uuid,
            experiment_id=experiment_id,
            run_name=run_name,
            nested=nest_run,
        ) 
Example #23
Source File: loggers.py    From OpenKiwi with GNU Affero General Public License v3.0 5 votes vote down vote up
def end_run():
        pass 
Example #24
Source File: utils.py    From FARM with Apache License 2.0 5 votes vote down vote up
def end_run(cls):
        mlflow.end_run()