Commit cf69c55e authored by Joaquin Torres's avatar Joaquin Torres

Generated first ROC curve to see behavior

parent 72e7890d
...@@ -175,8 +175,8 @@ if __name__ == "__main__": ...@@ -175,8 +175,8 @@ if __name__ == "__main__":
# Metric generation through cv for tuned models3 # Metric generation through cv for tuned models3
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
scores_sheets = {} # To store score dfs as sheets in the same excel file scores_sheets = {} # To store score dfs as sheets in the same excel file
for i, group in enumerate(['pre', 'post']): for i, group in enumerate(['pre']): # 'post'
for j, method in enumerate(['', '', 'over_', 'under_']): for j, method in enumerate(['']): # '', 'over_', 'under_'
# print(f"{group}-{method_names[j]}") # print(f"{group}-{method_names[j]}")
# Get train dataset based on group and method # Get train dataset based on group and method
X_train = data_dic['X_train_' + method + group] X_train = data_dic['X_train_' + method + group]
...@@ -191,28 +191,29 @@ if __name__ == "__main__": ...@@ -191,28 +191,29 @@ if __name__ == "__main__":
axes = [axes] axes = [axes]
# Metric generation for each model # Metric generation for each model
for model_idx, (model_name, model) in enumerate(models.items()): for model_idx, (model_name, model) in enumerate(models.items()):
print(f"{group}-{method_names[j]}-{model_name}") if model_name == 'DT':
# Retrieve cv scores for our metrics of interest print(f"{group}-{method_names[j]}-{model_name}")
scores = cross_validate(model, X_train, y_train, scoring=scorings, cv=cv, return_train_score=True, n_jobs=10) # Retrieve cv scores for our metrics of interest
# Save results of each fold scores = cross_validate(model, X_train, y_train, scoring=scorings, cv=cv, return_train_score=True, n_jobs=10)
for metric_name in scorings.keys(): # Save results of each fold
scores_df.loc[model_name + f'_{metric_name}']=list(np.around(np.array(scores[f"test_{metric_name}"]),4)) for metric_name in scorings.keys():
# Generate ROC curves scores_df.loc[model_name + f'_{metric_name}']=list(np.around(np.array(scores[f"test_{metric_name}"]),4))
mean_fpr = np.linspace(0, 1, 100) # Generate ROC curves
tprs, aucs = [], [] mean_fpr = np.linspace(0, 1, 100)
# Loop through each fold in the cross-validation tprs, aucs = [], []
for fold_idx, (train, test) in enumerate(cv.split(X_train, y_train)): # Loop through each fold in the cross-validation
# Fit the model on the training data for fold_idx, (train, test) in enumerate(cv.split(X_train, y_train)):
model.fit(X_train[train], y_train[train]) # Fit the model on the training data
# Use RocCurveDisplay to generate the ROC curve model.fit(X_train[train], y_train[train])
roc_display = RocCurveDisplay.from_estimator(model, X_train[test], y_train[test], # Use RocCurveDisplay to generate the ROC curve
name=f"ROC fold {fold_idx}", alpha=0.3, lw=1, ax=axes[model_idx]) roc_display = RocCurveDisplay.from_estimator(model, X_train[test], y_train[test],
# Interpolate the true positive rates to get a smooth curve name=f"ROC fold {fold_idx}", alpha=0.3, lw=1, ax=axes[model_idx])
interp_tpr = np.interp(mean_fpr, roc_display.fpr, roc_display.tpr) # Interpolate the true positive rates to get a smooth curve
interp_tpr[0] = 0.0 interp_tpr = np.interp(mean_fpr, roc_display.fpr, roc_display.tpr)
# Append the interpolated TPR and AUC for this fold interp_tpr[0] = 0.0
tprs.append(interp_tpr) # Append the interpolated TPR and AUC for this fold
aucs.append(roc_display.roc_auc) tprs.append(interp_tpr)
aucs.append(roc_display.roc_auc)
# Plot the diagonal line representing random guessing # Plot the diagonal line representing random guessing
axes[model_idx].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8) axes[model_idx].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8)
# Compute the mean and standard deviation of the TPRs # Compute the mean and standard deviation of the TPRs
...@@ -220,19 +221,16 @@ if __name__ == "__main__": ...@@ -220,19 +221,16 @@ if __name__ == "__main__":
mean_tpr[-1] = 1.0 mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr) # Calculate the mean AUC mean_auc = auc(mean_fpr, mean_tpr) # Calculate the mean AUC
std_auc = np.std(aucs) std_auc = np.std(aucs)
# Plot the mean ROC curve # Plot the mean ROC curve
axes[model_idx].plot(mean_fpr, mean_tpr, color='b', axes[model_idx].plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8) lw=2, alpha=.8)
# Plot the standard deviation of the TPRs # Plot the standard deviation of the TPRs
std_tpr = np.std(tprs, axis=0) std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0) tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
axes[model_idx].fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, axes[model_idx].fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.') label=r'$\pm$ 1 std. dev.')
# Set plot limits and title # Set plot limits and title
axes[model_idx].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], axes[model_idx].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title=f"ROC Curve - {model_name} ({group}-{method_names[j]})") title=f"ROC Curve - {model_name} ({group}-{method_names[j]})")
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment