A Complete End-to-End Coding Guide to MLflow Experiment Tracking, Hyperparameter Optimization, Model Evaluation, and Live Model Deployment


best_C = best["params"]["C"]
best_solver = best["params"]["solver"]


final_pipe = Pipeline([
   ("scaler", StandardScaler()),
   ("clf", LogisticRegression(
       C=best_C,
       solver=best_solver,
       penalty="l2",
       max_iter=2000,
       random_state=42
   ))
])


with mlflow.start_run(run_name="final_model_run") as final_run:
   final_pipe.fit(X_train, y_train)


   proba = final_pipe.predict_proba(X_test)[:, 1]
   pred = (proba >= 0.5).astype(int)


   metrics = {
       "test_auc": float(roc_auc_score(y_test, proba)),
       "test_accuracy": float(accuracy_score(y_test, pred)),
       "test_precision": float(precision_score(y_test, pred, zero_division=0)),
       "test_recall": float(recall_score(y_test, pred, zero_division=0)),
       "test_f1": float(f1_score(y_test, pred, zero_division=0)),
   }
   mlflow.log_metrics(metrics)
   mlflow.log_params({"C": best_C, "solver": best_solver, "model": "LogisticRegression+StandardScaler"})


   input_example = X_test.iloc[:5].copy()
   signature = infer_signature(input_example, final_pipe.predict_proba(input_example)[:, 1])


   model_info = mlflow.sklearn.log_model(
       sk_model=final_pipe,
       artifact_path="model",
       signature=signature,
       input_example=input_example,
       registered_model_name=None,
   )


   print("Final run_id:", final_run.info.run_id)
   print("Logged model URI:", model_info.model_uri)


   eval_df = X_test.copy()
   eval_df["label"] = y_test.values


   eval_result = mlflow.models.evaluate(
       model=model_info.model_uri,
       data=eval_df,
       targets="label",
       model_type="classifier",
       evaluators="default",
   )


   eval_summary = {
       "metrics": {k: float(v) if isinstance(v, (int, float, np.floating)) else str(v)
                   for k, v in eval_result.metrics.items()},
       "artifacts": {k: str(v) for k, v in eval_result.artifacts.items()},
   }
   mlflow.log_dict(eval_summary, "evaluation/eval_summary.json")



Source link

Leave a Reply

Your email address will not be published. Required fields are marked *