cv_metric_gen.py 14.5 KB
Newer Older
Joaquin Torres's avatar
Joaquin Torres committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
"""
    Metric generation for each tuned model.
    Done in a different script for perfomance and clarity purposes.
"""

# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score, accuracy_score, roc_auc_score, average_precision_score
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import  LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold, cross_validate
Joaquin Torres's avatar
Joaquin Torres committed
19
from sklearn.metrics import RocCurveDisplay, auc
Joaquin Torres's avatar
Joaquin Torres committed
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve
import matplotlib.pyplot as plt
import ast # String to dictionary
# --------------------------------------------------------------------------------------------------------

# Function to read training datasets
# --------------------------------------------------------------------------------------------------------
def read_data():

    # Load ORIGINAL training data
    X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
    y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
    X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
    y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)

    # Load oversampled training data
    X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
    y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
    X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
    y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)

    # Load undersampled training data
    X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
    y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
    X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
    y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)

    data_dic = {
        "X_train_pre": X_train_pre,
        "y_train_pre": y_train_pre,
        "X_train_post": X_train_post,
        "y_train_post": y_train_post,
        "X_train_over_pre": X_train_over_pre,
        "y_train_over_pre": y_train_over_pre,
        "X_train_over_post": X_train_over_post,
        "y_train_over_post": y_train_over_post,
        "X_train_under_pre": X_train_under_pre,
        "y_train_under_pre": y_train_under_pre,
        "X_train_under_post": X_train_under_post,
        "y_train_under_post": y_train_under_post,
    }

    return data_dic
# --------------------------------------------------------------------------------------------------------

# Returning tuned models for each situation
# --------------------------------------------------------------------------------------------------------
def get_tuned_models(group_str, method_str):
    # Read sheet corresponding to group and method with tuned models and their hyperparam
    tuned_models_df = pd.read_excel("./output_hyperparam/hyperparamers.xlsx",sheet_name=f"{group_str}_{method_str}")
    # Mapping from model abbreviations to sklearn model classes
    model_mapping = {
        'DT': DecisionTreeClassifier,
        'RF': RandomForestClassifier,
        'Bagging': BaggingClassifier,
        'AB': AdaBoostClassifier,
        'XGB': XGBClassifier,
        'LR': LogisticRegression,
        'SVM': SVC,
        'MLP': MLPClassifier
    }
    tuned_models = {}
    # Iterate through each row of the DataFrame
    for _, row in tuned_models_df.iterrows():
Joaquin Torres's avatar
Joaquin Torres committed
84
        model_name = row.iloc[0]
Joaquin Torres's avatar
Joaquin Torres committed
85
        # Read dictionary
Joaquin Torres's avatar
Joaquin Torres committed
86
        parameters = ast.literal_eval(row['Best Parameters'])
Joaquin Torres's avatar
Joaquin Torres committed
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
        # Add extra parameters 
        if model_name == 'AB':
            parameters['algorithm'] = 'SAMME'
        elif model_name == 'LR':
            parameters['max_iter'] = 1000
        elif model_name == 'SVM':
            parameters['max_iter'] = 1000
            parameters['probability'] = True
        elif model_name == "MLP":
            parameters['max_iter'] = 500
        # Add class_weight argument for cost-sensitive learning method
        if 'CW' in method_str:
            if model_name == 'Bagging' or model_name == 'AB':
                parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced')
            else:
                parameters['class_weight'] = 'balanced'
        # Fetch class
        model_class = model_mapping[model_name]
        # Initialize model
        tuned_models[model_name] = model_class(**parameters)
    return tuned_models
# --------------------------------------------------------------------------------------------------------

# Scorers
# --------------------------------------------------------------------------------------------------------
def TN_scorer(clf, X, y):
    """Gives the number of samples predicted as true negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN = cm[0,0]
    return TN
def FN_scorer(clf, X, y):
    """Gives the number of samples predicted as false negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    FN = cm[0,1]
    return FN
def FP_scorer(clf, X, y):
    """Gives the number of samples predicted as false positive"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    FP = cm[1,0]
    return FP
def TP_scorer(clf, X, y):
    """Gives the number of samples predicted as true positive"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TP = cm[1,1]
    return TP

def negative_recall_scorer(clf, X, y):
    """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN_prop = cm[0,0]/(cm[0,1]+cm[0,0])
    return TN_prop
# --------------------------------------------------------------------------------------------------------

if __name__ == "__main__":

    # Setup
    # --------------------------------------------------------------------------------------------------------
    # Reading training data
    data_dic = read_data()
    # Scorings to use for cv metric generation
    scorings = {
        'F1':make_scorer(f1_score), 
        'PREC':make_scorer(precision_score), 
        'REC':make_scorer(recall_score), 
156 157 158 159 160 161 162 163
        'ACC': make_scorer(accuracy_score),
        'NREC': negative_recall_scorer, 
        'TN':TN_scorer, 
        'FN':FN_scorer, 
        'FP':FP_scorer, 
        'TP':TP_scorer,
        'AUROC': make_scorer(roc_auc_score), 
        'AUPRC': make_scorer(average_precision_score)
Joaquin Torres's avatar
Joaquin Torres committed
164 165 166 167 168 169 170 171 172 173 174 175 176 177
        } 
    method_names = {
        0: "ORIG",
        1: "ORIG_CW",
        2: "OVER",
        3: "UNDER"
    }
    # Defining cross-validation protocol
    cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) 
    # --------------------------------------------------------------------------------------------------------

    # Metric generation through cv for tuned models3
    # --------------------------------------------------------------------------------------------------------
    scores_sheets = {} # To store score dfs as sheets in the same excel file
178 179
    for i, group in enumerate(['pre', 'post']): # 'post'
        for j, method in enumerate(['', 'over_', 'under_']): 
Joaquin Torres's avatar
Joaquin Torres committed
180 181 182 183 184
            # Get train dataset based on group and method
            X_train = data_dic['X_train_' + method + group]
            y_train = data_dic['y_train_' + method + group]
            # Get tuned models for this group and method
            models = get_tuned_models(group, method_names[j])
185
            # Scores df -> one column per cv split, one row for each model-metric
Joaquin Torres's avatar
Joaquin Torres committed
186
            scores_df = pd.DataFrame(columns=range(1,11), index=[f"{model_name}_{metric_name}" for model_name in models.keys() for metric_name in scorings.keys()])
187
            # Create a figure for all models in this group-method
Joaquin Torres's avatar
Joaquin Torres committed
188
            fig, axes = plt.subplots(len(models), 2, figsize=(10, 8 * len(models)))
189 190
            if len(models) == 1:  # Adjustment if there's only one model (axes indexing issue)
                axes = [axes]
Joaquin Torres's avatar
Joaquin Torres committed
191
            # Metric generation for each model
192
            for model_idx, (model_name, model) in enumerate(models.items()):
193
                print(f"{group}-{method_names[j]}-{model_name}")
194 195 196 197 198
                # # Retrieve cv scores for our metrics of interest
                # scores = cross_validate(model, X_train, y_train, scoring=scorings, cv=cv, return_train_score=True, n_jobs=10)
                # # Save results of each fold
                # for metric_name in scorings.keys():
                #     scores_df.loc[model_name + f'_{metric_name}']=list(np.around(np.array(scores[f"test_{metric_name}"]),4)) 
199 200 201 202 203
                mean_fpr = np.linspace(0, 1, 100)
                tprs, aucs = [], []
                mean_recall = np.linspace(0, 1, 100)
                precisions, pr_aucs = [], []
                cmap = plt.get_cmap('tab10')  # Colormap
204 205
                # Initialize storage for scores for each fold
                fold_scores = {metric_name: [] for metric_name in scorings.keys()}
206
                # Loop through each fold in the cross-validation
207 208 209
                for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X_train, y_train)):
                    X_train_fold, X_test_fold = X_train[train_idx], X_train[test_idx]
                    y_train_fold, y_test_fold = y_train[train_idx], y_train[test_idx]
210
                    # Fit the model on the training data
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
                    model.fit(X_train_fold, y_train_fold)
                    # Predict on the test data
                    if hasattr(model, "decision_function"):
                        y_score = model.decision_function(X_test_fold)
                    else:
                        y_score = model.predict_proba(X_test_fold)[:, 1]  # Use probability of positive class
                    y_pred = model.predict(X_test_fold)
                    # Calculate and store the scores for each metric
                    for metric_name, scorer in scorings.items():
                        if metric_name in ['AUROC', 'AUPRC']:
                            score = scorer._score_func(y_test_fold, y_score)
                        else:
                            score = scorer._score_func(y_test_fold, y_pred)
                        fold_scores[metric_name].append(score)
                    # --------------------- CURVES ---------------------------
226
                    # Generate ROC curve for the fold
227
                    roc_display = RocCurveDisplay.from_estimator(model, X_test_fold, y_test_fold,
228 229 230 231 232 233 234
                                                                name=f"ROC fold {fold_idx}", alpha=0.6, lw=2,
                                                                ax=axes[model_idx][0], color=cmap(fold_idx % 10))
                    interp_tpr = np.interp(mean_fpr, roc_display.fpr, roc_display.tpr)
                    interp_tpr[0] = 0.0
                    tprs.append(interp_tpr)
                    aucs.append(roc_display.roc_auc)
                    # Generate Precision-Recall curve for the fold
235
                    pr_display = PrecisionRecallDisplay.from_estimator(model, X_test_fold, y_test_fold,
236 237 238 239 240
                                                                    name=f"PR fold {fold_idx}", alpha=0.6, lw=2,
                                                                    ax=axes[model_idx][1], color=cmap(fold_idx % 10))
                    interp_precision = np.interp(mean_recall, pr_display.recall[::-1], pr_display.precision[::-1])
                    precisions.append(interp_precision)
                    pr_aucs.append(pr_display.average_precision)
241 242 243
                # Store the fold scores in the dataframe
                for metric_name, scores in fold_scores.items():
                    scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4)
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
                # Plot diagonal line for random guessing in ROC curve
                axes[model_idx][0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8, label='Random guessing')
                # Compute mean ROC curve
                mean_tpr = np.mean(tprs, axis=0)
                mean_tpr[-1] = 1.0
                mean_auc = auc(mean_fpr, mean_tpr)
                axes[model_idx][0].plot(mean_fpr, mean_tpr, color='b', lw=4, label=r'Mean ROC (AUC = %0.2f)' % mean_auc, alpha=.8)
                # Set ROC plot limits and title
                axes[model_idx][0].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"ROC Curve - {model_name} ({group}-{method_names[j]})")
                axes[model_idx][0].legend(loc="lower right")
                # Compute mean Precision-Recall curve
                mean_precision = np.mean(precisions, axis=0)
                mean_pr_auc = np.mean(pr_aucs)
                axes[model_idx][1].plot(mean_recall, mean_precision, color='b', lw=4, label=r'Mean PR (AUC = %0.2f)' % mean_pr_auc, alpha=.8)
                # Plot baseline precision (proportion of positive samples)
                baseline = np.sum(y_train) / len(y_train)
                axes[model_idx][1].plot([0, 1], [baseline, baseline], linestyle='--', lw=2, color='r', alpha=.8, label='Baseline')
                # Set Precision-Recall plot limits and title
                axes[model_idx][1].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=f"Precision-Recall Curve - {model_name} ({group}-{method_names[j]})")
                axes[model_idx][1].legend(loc="lower right")
264 265 266
            # Store the DataFrame in the dictionary with a unique key for each sheet
            sheet_name = f"{group}_{method_names[j]}"
            scores_sheets[sheet_name] = scores_df
Joaquin Torres's avatar
Joaquin Torres committed
267
            # Adjust layout and save figure
268 269 270
            plt.tight_layout()
            plt.savefig(f'./output_cv_metrics/curves/{group}_{method_names[j]}.svg', format='svg', dpi=500)
            plt.close(fig)
Joaquin Torres's avatar
Joaquin Torres committed
271
    # Write results to Excel file
272
    with pd.ExcelWriter('./output_cv_metrics/metrics.xlsx') as writer:
Joaquin Torres's avatar
Joaquin Torres committed
273 274
        for sheet_name, data in scores_sheets.items():
            data.to_excel(writer, sheet_name=sheet_name)
Joaquin Torres's avatar
Joaquin Torres committed
275
    print("Successful cv metric generation for tuned models")