# CV Metric Generation # Author: JoaquĆ­n Torres Bravo """ Metric generation for each tuned model. Done in a different script for perfomance and clarity purposes. """ # Libraries # -------------------------------------------------------------------------------------------------------- # Basics import pandas as pd import numpy as np import matplotlib.pyplot as plt # Models from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier # Metrics from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score, accuracy_score, roc_auc_score, average_precision_score from sklearn.metrics import RocCurveDisplay, auc from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve # CV from sklearn.model_selection import StratifiedKFold # Misc import ast # String to dictionary # -------------------------------------------------------------------------------------------------------- # Function to read training datasets # -------------------------------------------------------------------------------------------------------- def read_data(): # Load ORIGINAL training data X_train_pre = np.load('../02-training_data_generation/data/results/pre/X_train_pre.npy', allow_pickle=True) y_train_pre = np.load('../02-training_data_generation/data/results/pre/y_train_pre.npy', allow_pickle=True) X_train_post = np.load('../02-training_data_generation/data/results/post/X_train_post.npy', allow_pickle=True) y_train_post = np.load('../02-training_data_generation/data/results/post/y_train_post.npy', allow_pickle=True) # Load oversampled training data X_train_over_pre = np.load('../02-training_data_generation/data/results/pre/X_train_over_pre.npy', allow_pickle=True) y_train_over_pre = np.load('../02-training_data_generation/data/results/pre/y_train_over_pre.npy', allow_pickle=True) X_train_over_post = np.load('../02-training_data_generation/data/results/post/X_train_over_post.npy', allow_pickle=True) y_train_over_post = np.load('../02-training_data_generation/data/results/post/y_train_over_post.npy', allow_pickle=True) # Load undersampled training data X_train_under_pre = np.load('../02-training_data_generation/data/results/pre/X_train_under_pre.npy', allow_pickle=True) y_train_under_pre = np.load('../02-training_data_generation/data/results/pre/y_train_under_pre.npy', allow_pickle=True) X_train_under_post = np.load('../02-training_data_generation/data/results/post/X_train_under_post.npy', allow_pickle=True) y_train_under_post = np.load('../02-training_data_generation/data/results/post/y_train_under_post.npy', allow_pickle=True) data_dic = { "X_train_pre": X_train_pre, "y_train_pre": y_train_pre, "X_train_post": X_train_post, "y_train_post": y_train_post, "X_train_over_pre": X_train_over_pre, "y_train_over_pre": y_train_over_pre, "X_train_over_post": X_train_over_post, "y_train_over_post": y_train_over_post, "X_train_under_pre": X_train_under_pre, "y_train_under_pre": y_train_under_pre, "X_train_under_post": X_train_under_post, "y_train_under_post": y_train_under_post, } return data_dic # -------------------------------------------------------------------------------------------------------- # Returning tuned models for each situation # -------------------------------------------------------------------------------------------------------- def get_tuned_models(group_str, method_str): # Read sheet corresponding to group and method with tuned models and their hyperparam tuned_models_df = pd.read_excel("./results_hyperparam/hyperparamers.xlsx",sheet_name=f"{group_str}_{method_str}") # Mapping from model abbreviations to sklearn model classes model_mapping = { 'DT': DecisionTreeClassifier, 'RF': RandomForestClassifier, 'Bagging': BaggingClassifier, 'AB': AdaBoostClassifier, 'XGB': XGBClassifier, 'LR': LogisticRegression, 'SVM': SVC, 'MLP': MLPClassifier } tuned_models = {} # Iterate through each row of the DataFrame for _, row in tuned_models_df.iterrows(): model_name = row.iloc[0] # Read dictionary with parameters parameters = ast.literal_eval(row['Best Parameters']) # Add extra parameters if needed if model_name == 'AB': parameters['algorithm'] = 'SAMME' elif model_name == 'LR': parameters['max_iter'] = 1000 elif model_name == 'SVM': parameters['max_iter'] = 1000 parameters['probability'] = True elif model_name == "MLP": parameters['max_iter'] = 500 # Add class_weight argument for cost-sensitive learning method if 'CW' in method_str: if model_name == 'Bagging' or model_name == 'AB': parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced') else: parameters['class_weight'] = 'balanced' # Fetch class model_class = model_mapping[model_name] # Initialize model tuned_models[model_name] = model_class(**parameters) return tuned_models # -------------------------------------------------------------------------------------------------------- # Scorers # -------------------------------------------------------------------------------------------------------- def TN_scorer(clf, X, y): """Gives the number of samples predicted as true negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) return cm[0, 0] def FN_scorer(clf, X, y): """Gives the number of samples predicted as false negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) return cm[1, 0] def FP_scorer(clf, X, y): """Gives the number of samples predicted as false positives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) return cm[0, 1] def TP_scorer(clf, X, y): """Gives the number of samples predicted as true positives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) return cm[1, 1] def negative_recall_scorer(clf, X, y): """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) TN_prop = cm[0,0]/(cm[0,1]+cm[0,0]) return TN_prop # Custom scorers for AUROC (Area Under the Receiver Operating Characteristic Curve) and AUPRC (Area Under the Precision-Recall Curve) def AUROC_scorer(clf, X, y): # Check if the classifier has a decision_function method if hasattr(clf, "decision_function"): # If so, use the decision function to get the scores for X y_score = clf.decision_function(X) else: # Otherwise, use predict_proba to get the probabilities, and take the probabilities for the positive class (index 1) y_score = clf.predict_proba(X)[:, 1] # Compute and return the ROC AUC score using the true labels and the predicted scores return roc_auc_score(y, y_score) def AUPRC_scorer(clf, X, y): # Check if the classifier has a decision_function method if hasattr(clf, "decision_function"): # If so, use the decision function to get the scores for X y_score = clf.decision_function(X) else: # Otherwise, use predict_proba to get the probabilities, and take the probabilities for the positive class (index 1) y_score = clf.predict_proba(X)[:, 1] # Compute and return the average precision score using the true labels and the predicted scores return average_precision_score(y, y_score) # -------------------------------------------------------------------------------------------------------- if __name__ == "__main__": # Setup # -------------------------------------------------------------------------------------------------------- # Reading training data data_dic = read_data() # Scorings to use for cv metric generation scorings = { 'F1':make_scorer(f1_score), 'PREC':make_scorer(precision_score), 'REC':make_scorer(recall_score), 'ACC': make_scorer(accuracy_score), 'NREC': negative_recall_scorer, 'TN':TN_scorer, 'FN':FN_scorer, 'FP':FP_scorer, 'TP':TP_scorer, 'AUROC': AUROC_scorer, 'AUPRC': AUPRC_scorer } method_names = { 0: "ORIG", 1: "ORIG_CW", 2: "OVER", 3: "UNDER" } # Defining cross-validation protocol cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42) # Colormap cmap = plt.get_cmap('tab10') # -------------------------------------------------------------------------------------------------------- # Metric generation through cv for tuned models # -------------------------------------------------------------------------------------------------------- scores_sheets = {} # To store score dfs as sheets in the same excel file for i, group in enumerate(['pre', 'post']): for j, method in enumerate(['', '', 'over_', 'under_']): # Get train dataset based on group and method X_train = data_dic['X_train_' + method + group] y_train = data_dic['y_train_' + method + group] # Get tuned models for this group and method models = get_tuned_models(group, method_names[j]) # Scores df -> one column per cv split, one row for each model-metric scores_df = pd.DataFrame(columns=range(1,11), index=[f"{model_name}_{metric_name}" for model_name in models.keys() for metric_name in scorings.keys()]) # Create a figure with 2 subplots (roc and pr curves) for each model in this group-method fig, axes = plt.subplots(len(models), 2, figsize=(10, 8 * len(models))) # Metric generation for each model for model_idx, (model_name, model) in enumerate(models.items()): print(f"{group}-{method_names[j]}-{model_name}") # Initialize storage for scores for each fold fold_scores = {metric_name: [] for metric_name in scorings.keys()} # ROC setup mean_fpr = np.linspace(0, 1, 100) tprs, aucs = [], [] # PR setup y_real, y_proba = [], [] # Manually loop through each fold in the cross-validation for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X_train, y_train)): X_train_fold, X_test_fold = X_train[train_idx], X_train[test_idx] y_train_fold, y_test_fold = y_train[train_idx], y_train[test_idx] # Fit the model on the training data model.fit(X_train_fold, y_train_fold) # --------------------- SCORINGS --------------------------- # Calculate and store the scores for each metric for metric_name, scorer in scorings.items(): score = scorer(model, X_test_fold, y_test_fold) fold_scores[metric_name].append(score) # --------------------- CURVES --------------------------- # ROC generation for current fold roc_display = RocCurveDisplay.from_estimator(model, X_test_fold, y_test_fold, name=f"ROC fold {fold_idx}", alpha=0.6, lw=2, ax=axes[model_idx][0], color=cmap(fold_idx % 10)) interp_tpr = np.interp(mean_fpr, roc_display.fpr, roc_display.tpr) interp_tpr[0] = 0.0 tprs.append(interp_tpr) aucs.append(roc_display.roc_auc) # PR-recall generation for current fold if hasattr(model, "decision_function"): y_score = model.decision_function(X_test_fold) else: y_score = model.predict_proba(X_test_fold)[:, 1] precision, recall, _ = precision_recall_curve(y_test_fold, y_score) pr_auc = average_precision_score(y_test_fold, y_score) axes[model_idx][1].plot(recall, precision, lw=2, alpha=0.3, label='PR fold %d (AUPRC = %0.2f)' % (fold_idx, pr_auc)) y_real.append(y_test_fold) y_proba.append(y_score) # Mean ROC Curve mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) axes[model_idx][0].plot(mean_fpr, mean_tpr, color='b', lw=4, label=r'Mean ROC (AUC = %0.2f)' % mean_auc, alpha=.8) # Plot diagonal line for random guessing in ROC curve axes[model_idx][0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8, label='Random guessing') # Set ROC plot limits and title axes[model_idx][0].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"ROC Curve - {model_name} ({group}-{method_names[j]})") axes[model_idx][0].legend(loc="lower right", fontsize='small') # Mean PR Curve y_real = np.concatenate(y_real) y_proba = np.concatenate(y_proba) precision, recall, _ = precision_recall_curve(y_real, y_proba) axes[model_idx][1].plot(recall, precision, color='b', label=r'Mean PR (AUPRC = %0.2f)' % (average_precision_score(y_real, y_proba)), lw=4, alpha=.8) # Plot baseline precision (proportion of positive samples) baseline = np.sum(y_train) / len(y_train) axes[model_idx][1].plot([0, 1], [baseline, baseline], linestyle='--', lw=2, color='r', alpha=.8, label='Baseline') # Set Precision-Recall plot limits and title axes[model_idx][1].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"Precision-Recall Curve - {model_name} ({group}-{method_names[j]})") axes[model_idx][1].legend(loc="lower left", fontsize='small') axes[model_idx][1].set_aspect('equal') # Set the aspect ratio to be # Add axis labels axes[model_idx][1].set_xlabel('Recall') axes[model_idx][1].set_ylabel('Precision') # --------------------- END CURVES --------------------------- # Store the fold scores in the dataframe for metric_name, scores in fold_scores.items(): scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4) sheet_name = f"{group}_{method_names[j]}" scores_sheets[sheet_name] = scores_df # Adjust layout and save figure plt.tight_layout() plt.savefig(f'./results/cv_metrics/curves/{group}_{method_names[j]}.svg', format='svg', dpi=500) plt.close(fig) # Write results to Excel file with pd.ExcelWriter('./results./cv_metrics/metrics.xlsx') as writer: for sheet_name, data in scores_sheets.items(): data.to_excel(writer, sheet_name=sheet_name) print("Successful cv metric generation for tuned models") # --------------------------------------------------------------------------------------------------------