Commit 283ca8df authored by Joaquin Torres's avatar Joaquin Torres

Ready to test new integration with DT

parent f0c96956
...@@ -175,8 +175,8 @@ if __name__ == "__main__": ...@@ -175,8 +175,8 @@ if __name__ == "__main__":
# Metric generation through cv for tuned models3 # Metric generation through cv for tuned models3
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
scores_sheets = {} # To store score dfs as sheets in the same excel file scores_sheets = {} # To store score dfs as sheets in the same excel file
for i, group in enumerate(['pre', 'post']): # 'post' for i, group in enumerate(['pre']):
for j, method in enumerate(['', 'over_', 'under_']): for j, method in enumerate(['']):
# Get train dataset based on group and method # Get train dataset based on group and method
X_train = data_dic['X_train_' + method + group] X_train = data_dic['X_train_' + method + group]
y_train = data_dic['y_train_' + method + group] y_train = data_dic['y_train_' + method + group]
...@@ -184,18 +184,15 @@ if __name__ == "__main__": ...@@ -184,18 +184,15 @@ if __name__ == "__main__":
models = get_tuned_models(group, method_names[j]) models = get_tuned_models(group, method_names[j])
# Scores df -> one column per cv split, one row for each model-metric # Scores df -> one column per cv split, one row for each model-metric
scores_df = pd.DataFrame(columns=range(1,11), index=[f"{model_name}_{metric_name}" for model_name in models.keys() for metric_name in scorings.keys()]) scores_df = pd.DataFrame(columns=range(1,11), index=[f"{model_name}_{metric_name}" for model_name in models.keys() for metric_name in scorings.keys()])
# Create a figure for all models in this group-method # Create a figure with 2 subplots (roc and pr curves) for each model in this group-method
fig, axes = plt.subplots(len(models), 2, figsize=(10, 8 * len(models))) fig, axes = plt.subplots(len(models), 2, figsize=(10, 8 * len(models)))
if len(models) == 1: # Adjustment if there's only one model (axes indexing issue) if len(models) == 1: # Adjustment if there's only one model (axes indexing issue)
axes = [axes] axes = [axes]
# Metric generation for each model # Metric generation for each model
for model_idx, (model_name, model) in enumerate(models.items()): for model_idx, (model_name, model) in enumerate(models.items()):
print(f"{group}-{method_names[j]}-{model_name}") print(f"{group}-{method_names[j]}-{model_name}")
# # Retrieve cv scores for our metrics of interest if model_name == 'DT':
# scores = cross_validate(model, X_train, y_train, scoring=scorings, cv=cv, return_train_score=True, n_jobs=10) # Curve generation setup
# # Save results of each fold
# for metric_name in scorings.keys():
# scores_df.loc[model_name + f'_{metric_name}']=list(np.around(np.array(scores[f"test_{metric_name}"]),4))
mean_fpr = np.linspace(0, 1, 100) mean_fpr = np.linspace(0, 1, 100)
tprs, aucs = [], [] tprs, aucs = [], []
mean_recall = np.linspace(0, 1, 100) mean_recall = np.linspace(0, 1, 100)
...@@ -203,17 +200,24 @@ if __name__ == "__main__": ...@@ -203,17 +200,24 @@ if __name__ == "__main__":
cmap = plt.get_cmap('tab10') # Colormap cmap = plt.get_cmap('tab10') # Colormap
# Initialize storage for scores for each fold # Initialize storage for scores for each fold
fold_scores = {metric_name: [] for metric_name in scorings.keys()} fold_scores = {metric_name: [] for metric_name in scorings.keys()}
# Loop through each fold in the cross-validation # Manually loop through each fold in the cross-validation
for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X_train, y_train)): for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X_train, y_train)):
X_train_fold, X_test_fold = X_train[train_idx], X_train[test_idx] X_train_fold, X_test_fold = X_train[train_idx], X_train[test_idx]
y_train_fold, y_test_fold = y_train[train_idx], y_train[test_idx] y_train_fold, y_test_fold = y_train[train_idx], y_train[test_idx]
# Fit the model on the training data # Fit the model on the training data
model.fit(X_train_fold, y_train_fold) model.fit(X_train_fold, y_train_fold)
# --------------------- SCORINGS ---------------------------
# Predict on the test data # Predict on the test data
# Check if the model has a decision_function method
if hasattr(model, "decision_function"): if hasattr(model, "decision_function"):
# Use decision_function to get the continuous scores for each test sample
y_score = model.decision_function(X_test_fold) y_score = model.decision_function(X_test_fold)
else: else:
y_score = model.predict_proba(X_test_fold)[:, 1] # Use probability of positive class # If decision_function is not available, use predict_proba to get probabilities
# predict_proba returns an array with probabilities for all classes
# [:, 1] extracts the probability for the positive class (class 1)
y_score = model.predict_proba(X_test_fold)[:, 1]
# Get the predicted class labels for the test data
y_pred = model.predict(X_test_fold) y_pred = model.predict(X_test_fold)
# Calculate and store the scores for each metric # Calculate and store the scores for each metric
for metric_name, scorer in scorings.items(): for metric_name, scorer in scorings.items():
...@@ -222,6 +226,7 @@ if __name__ == "__main__": ...@@ -222,6 +226,7 @@ if __name__ == "__main__":
else: else:
score = scorer._score_func(y_test_fold, y_pred) score = scorer._score_func(y_test_fold, y_pred)
fold_scores[metric_name].append(score) fold_scores[metric_name].append(score)
# --------------------- END SCORINGS ---------------------------
# --------------------- CURVES --------------------------- # --------------------- CURVES ---------------------------
# Generate ROC curve for the fold # Generate ROC curve for the fold
roc_display = RocCurveDisplay.from_estimator(model, X_test_fold, y_test_fold, roc_display = RocCurveDisplay.from_estimator(model, X_test_fold, y_test_fold,
...@@ -238,9 +243,6 @@ if __name__ == "__main__": ...@@ -238,9 +243,6 @@ if __name__ == "__main__":
interp_precision = np.interp(mean_recall, pr_display.recall[::-1], pr_display.precision[::-1]) interp_precision = np.interp(mean_recall, pr_display.recall[::-1], pr_display.precision[::-1])
precisions.append(interp_precision) precisions.append(interp_precision)
pr_aucs.append(pr_display.average_precision) pr_aucs.append(pr_display.average_precision)
# Store the fold scores in the dataframe
for metric_name, scores in fold_scores.items():
scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4)
# Plot diagonal line for random guessing in ROC curve # Plot diagonal line for random guessing in ROC curve
axes[model_idx][0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8, label='Random guessing') axes[model_idx][0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8, label='Random guessing')
# Compute mean ROC curve # Compute mean ROC curve
...@@ -261,6 +263,10 @@ if __name__ == "__main__": ...@@ -261,6 +263,10 @@ if __name__ == "__main__":
# Set Precision-Recall plot limits and title # Set Precision-Recall plot limits and title
axes[model_idx][1].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"Precision-Recall Curve - {model_name} ({group}-{method_names[j]})") axes[model_idx][1].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"Precision-Recall Curve - {model_name} ({group}-{method_names[j]})")
axes[model_idx][1].legend(loc="lower right") axes[model_idx][1].legend(loc="lower right")
# --------------------- END CURVES ---------------------------
# Store the fold scores in the dataframe
for metric_name, scores in fold_scores.items():
scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4)
# Store the DataFrame in the dictionary with a unique key for each sheet # Store the DataFrame in the dictionary with a unique key for each sheet
sheet_name = f"{group}_{method_names[j]}" sheet_name = f"{group}_{method_names[j]}"
scores_sheets[sheet_name] = scores_df scores_sheets[sheet_name] = scores_df
...@@ -273,3 +279,4 @@ if __name__ == "__main__": ...@@ -273,3 +279,4 @@ if __name__ == "__main__":
for sheet_name, data in scores_sheets.items(): for sheet_name, data in scores_sheets.items():
data.to_excel(writer, sheet_name=sheet_name) data.to_excel(writer, sheet_name=sheet_name)
print("Successful cv metric generation for tuned models") print("Successful cv metric generation for tuned models")
# --------------------------------------------------------------------------------------------------------
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment