diff --git a/model_selection/cv_metric_gen.py b/model_selection/cv_metric_gen.py index 2042928a9e286f77500e901a1175c1209796fe29..b5f4a08ce77b52554ca4739657d7fb95dfd4ff69 100644 --- a/model_selection/cv_metric_gen.py +++ b/model_selection/cv_metric_gen.py @@ -175,8 +175,8 @@ if __name__ == "__main__": # Metric generation through cv for tuned models3 # -------------------------------------------------------------------------------------------------------- scores_sheets = {} # To store score dfs as sheets in the same excel file - for i, group in enumerate(['pre', 'post']): # 'post' - for j, method in enumerate(['', 'over_', 'under_']): + for i, group in enumerate(['pre']): + for j, method in enumerate(['']): # Get train dataset based on group and method X_train = data_dic['X_train_' + method + group] y_train = data_dic['y_train_' + method + group] @@ -184,63 +184,65 @@ if __name__ == "__main__": models = get_tuned_models(group, method_names[j]) # Scores df -> one column per cv split, one row for each model-metric scores_df = pd.DataFrame(columns=range(1,11), index=[f"{model_name}_{metric_name}" for model_name in models.keys() for metric_name in scorings.keys()]) - # Create a figure for all models in this group-method + # Create a figure with 2 subplots (roc and pr curves) for each model in this group-method fig, axes = plt.subplots(len(models), 2, figsize=(10, 8 * len(models))) if len(models) == 1: # Adjustment if there's only one model (axes indexing issue) axes = [axes] # Metric generation for each model for model_idx, (model_name, model) in enumerate(models.items()): print(f"{group}-{method_names[j]}-{model_name}") - # # Retrieve cv scores for our metrics of interest - # scores = cross_validate(model, X_train, y_train, scoring=scorings, cv=cv, return_train_score=True, n_jobs=10) - # # Save results of each fold - # for metric_name in scorings.keys(): - # scores_df.loc[model_name + f'_{metric_name}']=list(np.around(np.array(scores[f"test_{metric_name}"]),4)) - mean_fpr = np.linspace(0, 1, 100) - tprs, aucs = [], [] - mean_recall = np.linspace(0, 1, 100) - precisions, pr_aucs = [], [] - cmap = plt.get_cmap('tab10') # Colormap - # Initialize storage for scores for each fold - fold_scores = {metric_name: [] for metric_name in scorings.keys()} - # Loop through each fold in the cross-validation - for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X_train, y_train)): - X_train_fold, X_test_fold = X_train[train_idx], X_train[test_idx] - y_train_fold, y_test_fold = y_train[train_idx], y_train[test_idx] - # Fit the model on the training data - model.fit(X_train_fold, y_train_fold) - # Predict on the test data - if hasattr(model, "decision_function"): - y_score = model.decision_function(X_test_fold) - else: - y_score = model.predict_proba(X_test_fold)[:, 1] # Use probability of positive class - y_pred = model.predict(X_test_fold) - # Calculate and store the scores for each metric - for metric_name, scorer in scorings.items(): - if metric_name in ['AUROC', 'AUPRC']: - score = scorer._score_func(y_test_fold, y_score) + if model_name == 'DT': + # Curve generation setup + mean_fpr = np.linspace(0, 1, 100) + tprs, aucs = [], [] + mean_recall = np.linspace(0, 1, 100) + precisions, pr_aucs = [], [] + cmap = plt.get_cmap('tab10') # Colormap + # Initialize storage for scores for each fold + fold_scores = {metric_name: [] for metric_name in scorings.keys()} + # Manually loop through each fold in the cross-validation + for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X_train, y_train)): + X_train_fold, X_test_fold = X_train[train_idx], X_train[test_idx] + y_train_fold, y_test_fold = y_train[train_idx], y_train[test_idx] + # Fit the model on the training data + model.fit(X_train_fold, y_train_fold) + # --------------------- SCORINGS --------------------------- + # Predict on the test data + # Check if the model has a decision_function method + if hasattr(model, "decision_function"): + # Use decision_function to get the continuous scores for each test sample + y_score = model.decision_function(X_test_fold) else: - score = scorer._score_func(y_test_fold, y_pred) - fold_scores[metric_name].append(score) + # If decision_function is not available, use predict_proba to get probabilities + # predict_proba returns an array with probabilities for all classes + # [:, 1] extracts the probability for the positive class (class 1) + y_score = model.predict_proba(X_test_fold)[:, 1] + # Get the predicted class labels for the test data + y_pred = model.predict(X_test_fold) + # Calculate and store the scores for each metric + for metric_name, scorer in scorings.items(): + if metric_name in ['AUROC', 'AUPRC']: + score = scorer._score_func(y_test_fold, y_score) + else: + score = scorer._score_func(y_test_fold, y_pred) + fold_scores[metric_name].append(score) + # --------------------- END SCORINGS --------------------------- # --------------------- CURVES --------------------------- - # Generate ROC curve for the fold - roc_display = RocCurveDisplay.from_estimator(model, X_test_fold, y_test_fold, - name=f"ROC fold {fold_idx}", alpha=0.6, lw=2, - ax=axes[model_idx][0], color=cmap(fold_idx % 10)) - interp_tpr = np.interp(mean_fpr, roc_display.fpr, roc_display.tpr) - interp_tpr[0] = 0.0 - tprs.append(interp_tpr) - aucs.append(roc_display.roc_auc) - # Generate Precision-Recall curve for the fold - pr_display = PrecisionRecallDisplay.from_estimator(model, X_test_fold, y_test_fold, - name=f"PR fold {fold_idx}", alpha=0.6, lw=2, - ax=axes[model_idx][1], color=cmap(fold_idx % 10)) - interp_precision = np.interp(mean_recall, pr_display.recall[::-1], pr_display.precision[::-1]) - precisions.append(interp_precision) - pr_aucs.append(pr_display.average_precision) - # Store the fold scores in the dataframe - for metric_name, scores in fold_scores.items(): - scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4) + # Generate ROC curve for the fold + roc_display = RocCurveDisplay.from_estimator(model, X_test_fold, y_test_fold, + name=f"ROC fold {fold_idx}", alpha=0.6, lw=2, + ax=axes[model_idx][0], color=cmap(fold_idx % 10)) + interp_tpr = np.interp(mean_fpr, roc_display.fpr, roc_display.tpr) + interp_tpr[0] = 0.0 + tprs.append(interp_tpr) + aucs.append(roc_display.roc_auc) + # Generate Precision-Recall curve for the fold + pr_display = PrecisionRecallDisplay.from_estimator(model, X_test_fold, y_test_fold, + name=f"PR fold {fold_idx}", alpha=0.6, lw=2, + ax=axes[model_idx][1], color=cmap(fold_idx % 10)) + interp_precision = np.interp(mean_recall, pr_display.recall[::-1], pr_display.precision[::-1]) + precisions.append(interp_precision) + pr_aucs.append(pr_display.average_precision) # Plot diagonal line for random guessing in ROC curve axes[model_idx][0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8, label='Random guessing') # Compute mean ROC curve @@ -261,6 +263,10 @@ if __name__ == "__main__": # Set Precision-Recall plot limits and title axes[model_idx][1].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"Precision-Recall Curve - {model_name} ({group}-{method_names[j]})") axes[model_idx][1].legend(loc="lower right") + # --------------------- END CURVES --------------------------- + # Store the fold scores in the dataframe + for metric_name, scores in fold_scores.items(): + scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4) # Store the DataFrame in the dictionary with a unique key for each sheet sheet_name = f"{group}_{method_names[j]}" scores_sheets[sheet_name] = scores_df @@ -272,4 +278,5 @@ if __name__ == "__main__": with pd.ExcelWriter('./output_cv_metrics/metrics.xlsx') as writer: for sheet_name, data in scores_sheets.items(): data.to_excel(writer, sheet_name=sheet_name) - print("Successful cv metric generation for tuned models") \ No newline at end of file + print("Successful cv metric generation for tuned models") + # -------------------------------------------------------------------------------------------------------- \ No newline at end of file diff --git a/model_selection/output_cv_metrics.xlsx b/model_selection/output_cv_metrics.xlsx deleted file mode 100644 index 72112a27b6ff36492eca8c7f8c5851ea62c74d66..0000000000000000000000000000000000000000 Binary files a/model_selection/output_cv_metrics.xlsx and /dev/null differ