test_models.py 14.2 KB
Newer Older
Joaquin Torres's avatar
Joaquin Torres committed
1 2
# Testing Tuned Models
# Author: Joaquín Torres Bravo
3 4 5 6 7 8
"""
    Evaluating optimized models with test data
"""

# Libraries
# --------------------------------------------------------------------------------------------------------
Joaquin Torres's avatar
Joaquin Torres committed
9
# Basics
10 11
import pandas as pd
import numpy as np
Joaquin Torres's avatar
Joaquin Torres committed
12 13 14
import matplotlib.pyplot as plt
import seaborn as sns
# Models
15 16 17 18 19 20
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import  LogisticRegression
from sklearn.tree import DecisionTreeClassifier
Joaquin Torres's avatar
Joaquin Torres committed
21 22 23
# Metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score, accuracy_score, roc_auc_score, average_precision_score
24 25
from sklearn.metrics import RocCurveDisplay, roc_curve
from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve
26
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
Joaquin Torres's avatar
Joaquin Torres committed
27
# Misc
28
import ast # String to dictionary
Joaquin Torres's avatar
Joaquin Torres committed
29
from mpl_toolkits.axes_grid1 import make_axes_locatable # Custom color bar for confusion matrices
30 31
# --------------------------------------------------------------------------------------------------------

32
# Reading data
33
# --------------------------------------------------------------------------------------------------------
34
def read_data():
35
    # Load test data
Joaquin Torres's avatar
Joaquin Torres committed
36 37 38 39
    X_test_pre = np.load('../02-training_data_generation/data/results/pre/X_test_pre.npy', allow_pickle=True)
    y_test_pre = np.load('../02-training_data_generation/data/results/pre/y_test_pre.npy', allow_pickle=True)
    X_test_post = np.load('../02-training_data_generation/data/results/post/X_test_post.npy', allow_pickle=True)
    y_test_post = np.load('../02-training_data_generation/data/results/post/y_test_post.npy', allow_pickle=True)
40

Joaquin Torres's avatar
Joaquin Torres committed
41
    # Load ORIGINAL training data
Joaquin Torres's avatar
Joaquin Torres committed
42 43 44 45
    X_train_pre = np.load('../02-training_data_generation/data/results/pre/X_train_pre.npy', allow_pickle=True)
    y_train_pre = np.load('../02-training_data_generation/data/results/pre/y_train_pre.npy', allow_pickle=True)
    X_train_post = np.load('../02-training_data_generation/data/results/post/X_train_post.npy', allow_pickle=True)
    y_train_post = np.load('../02-training_data_generation/data/results/post/y_train_post.npy', allow_pickle=True)
Joaquin Torres's avatar
Joaquin Torres committed
46 47

    # Load oversampled training data
Joaquin Torres's avatar
Joaquin Torres committed
48 49 50 51
    X_train_over_pre = np.load('../02-training_data_generation/data/results/pre/X_train_over_pre.npy', allow_pickle=True)
    y_train_over_pre = np.load('../02-training_data_generation/data/results/pre/y_train_over_pre.npy', allow_pickle=True)
    X_train_over_post = np.load('../02-training_data_generation/data/results/post/X_train_over_post.npy', allow_pickle=True)
    y_train_over_post = np.load('../02-training_data_generation/data/results/post/y_train_over_post.npy', allow_pickle=True)
Joaquin Torres's avatar
Joaquin Torres committed
52 53

    # Load undersampled training data
Joaquin Torres's avatar
Joaquin Torres committed
54 55 56 57
    X_train_under_pre = np.load('../02-training_data_generation/data/results/pre/X_train_under_pre.npy', allow_pickle=True)
    y_train_under_pre = np.load('../02-training_data_generation/data/results/pre/y_train_under_pre.npy', allow_pickle=True)
    X_train_under_post = np.load('../02-training_data_generation/data/results/post/X_train_under_post.npy', allow_pickle=True)
    y_train_under_post = np.load('../02-training_data_generation/data/results/post/y_train_under_post.npy', allow_pickle=True)
Joaquin Torres's avatar
Joaquin Torres committed
58

59 60 61 62 63
    data_dic = {
        "X_test_pre": X_test_pre,
        "y_test_pre": y_test_pre,
        "X_test_post": X_test_post,
        "y_test_post": y_test_post,
Joaquin Torres's avatar
Joaquin Torres committed
64 65 66 67 68 69 70 71 72 73 74 75
        "X_train_pre": X_train_pre,
        "y_train_pre": y_train_pre,
        "X_train_post": X_train_post,
        "y_train_post": y_train_post,
        "X_train_over_pre": X_train_over_pre,
        "y_train_over_pre": y_train_over_pre,
        "X_train_over_post": X_train_over_post,
        "y_train_over_post": y_train_over_post,
        "X_train_under_pre": X_train_under_pre,
        "y_train_under_pre": y_train_under_pre,
        "X_train_under_post": X_train_under_post,
        "y_train_under_post": y_train_under_post,
76 77 78 79 80 81 82
    }

    return data_dic
# --------------------------------------------------------------------------------------------------------

# Returning tuned models for each situation
# --------------------------------------------------------------------------------------------------------
83 84 85
def get_tuned_models(group_str, method_str):

    # Read sheet corresponding to group and method with tuned models and their hyperparam
Joaquin Torres's avatar
Joaquin Torres committed
86
    tuned_models_df = pd.read_excel("./results/hyperparam/hyperparamers.xlsx",sheet_name=f"{group_str}_{method_str}")
87 88 89 90 91 92 93 94 95 96 97 98 99
    # Mapping from model abbreviations to sklearn model classes
    model_mapping = {
        'DT': DecisionTreeClassifier,
        'RF': RandomForestClassifier,
        'Bagging': BaggingClassifier,
        'AB': AdaBoostClassifier,
        'XGB': XGBClassifier,
        'LR': LogisticRegression,
        'SVM': SVC,
        'MLP': MLPClassifier
    }
    tuned_models = {}
    # Iterate through each row of the DataFrame
100 101
    for _, row in tuned_models_df.iterrows():
        model_name = row.iloc[0]
102
        # Read dictionary
103
        parameters = ast.literal_eval(row['Best Parameters'])
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
        # Add extra parameters 
        if model_name == 'AB':
            parameters['algorithm'] = 'SAMME'
        elif model_name == 'LR':
            parameters['max_iter'] = 1000
        elif model_name == 'SVM':
            parameters['max_iter'] = 1000
            parameters['probability'] = True
        elif model_name == "MLP":
            parameters['max_iter'] = 500
        # Add class_weight argument for cost-sensitive learning method
        if 'CW' in method_str:
            if model_name == 'Bagging' or model_name == 'AB':
                parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced')
            else:
                parameters['class_weight'] = 'balanced'
        # Fetch class
        model_class = model_mapping[model_name]
        # Initialize model
        tuned_models[model_name] = model_class(**parameters)
124 125 126 127 128 129 130 131 132
    return tuned_models
# --------------------------------------------------------------------------------------------------------

# Scorers
# --------------------------------------------------------------------------------------------------------
def TN_scorer(clf, X, y):
    """Gives the number of samples predicted as true negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
Joaquin Torres's avatar
Joaquin Torres committed
133 134
    return cm[0, 0]

135 136 137 138
def FN_scorer(clf, X, y):
    """Gives the number of samples predicted as false negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
Joaquin Torres's avatar
Joaquin Torres committed
139 140
    return cm[1, 0]

141
def FP_scorer(clf, X, y):
Joaquin Torres's avatar
Joaquin Torres committed
142
    """Gives the number of samples predicted as false positives"""
143 144
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
Joaquin Torres's avatar
Joaquin Torres committed
145 146
    return cm[0, 1]

147
def TP_scorer(clf, X, y):
Joaquin Torres's avatar
Joaquin Torres committed
148
    """Gives the number of samples predicted as true positives"""
149 150
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
Joaquin Torres's avatar
Joaquin Torres committed
151
    return cm[1, 1]
152 153 154 155 156 157 158

def negative_recall_scorer(clf, X, y):
    """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN_prop = cm[0,0]/(cm[0,1]+cm[0,0])
    return TN_prop
Joaquin Torres's avatar
Joaquin Torres committed
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173

# Custom scorers for AUROC and AUPRC
def AUROC_scorer(clf, X, y):
    if hasattr(clf, "decision_function"):
        y_score = clf.decision_function(X)
    else:
        y_score = clf.predict_proba(X)[:, 1]
    return roc_auc_score(y, y_score)

def AUPRC_scorer(clf, X, y):
    if hasattr(clf, "decision_function"):
        y_score = clf.decision_function(X)
    else:
        y_score = clf.predict_proba(X)[:, 1]
    return average_precision_score(y, y_score)
174 175 176
# --------------------------------------------------------------------------------------------------------

if __name__ == "__main__":
177 178
    # Reading data
    data_dic = read_data()
179 180 181 182 183 184 185 186 187 188 189 190 191

    # Setup
    # --------------------------------------------------------------------------------------------------------
    # Scorings to use for model evaluation
    scorings = {
        'F1':make_scorer(f1_score), 
        'NREC': negative_recall_scorer, 
        'REC':make_scorer(recall_score), 
        'PREC':make_scorer(precision_score), 
        'ACC': make_scorer(accuracy_score),
        'TN':TN_scorer, 
        'FN':FN_scorer, 
        'FP':FP_scorer, 
192
        'TP':TP_scorer,
Joaquin Torres's avatar
Joaquin Torres committed
193 194
        'AUROC': AUROC_scorer,
        'AUPRC': AUPRC_scorer
195
        } 
196 197 198 199 200 201 202 203 204 205 206 207 208
    method_names = {
        0: "ORIG",
        1: "ORIG_CW",
        2: "OVER",
        3: "UNDER"
    }
    # --------------------------------------------------------------------------------------------------------

    # Evaluating performance using test dataset
    # --------------------------------------------------------------------------------------------------------
    scores_sheets = {} # To store score dfs as sheets in the same excel file
    for i, group in enumerate(['pre', 'post']):
        # Get test dataset based on group
Joaquin Torres's avatar
Joaquin Torres committed
209 210
        X_test = data_dic['X_test_' + group]
        y_test = data_dic['y_test_' + group]
211
        for j, method in enumerate(['', '', 'over_', 'under_']): 
Joaquin Torres's avatar
Joaquin Torres committed
212 213 214
            # Get train dataset based on group and method
            X_train = data_dic['X_train_' + method + group]
            y_train = data_dic['y_train_' + method + group]
215
            # Get tuned models for this group and method
216
            models = get_tuned_models(group, method_names[j])
217 218
            # Scores df
            scores_df = pd.DataFrame(index=models.keys(), columns=scorings.keys())
219
            # Create a figure for all models in this group-method
220
            fig, axes = plt.subplots(len(models), 3, figsize=(10, 8 * len(models)))
221
            # Evaluate each model with test dataset
222
            for model_idx, (model_name, model) in enumerate(models.items()):
223 224
                print(f"{group}-{method_names[j]}-{model_name}")
                # Fit the model on the training data
Joaquin Torres's avatar
Joaquin Torres committed
225
                model.fit(X_train, y_train)
226 227 228 229 230 231 232 233
                # --------------------- SCORINGS ---------------------------
                # Calculate and store the scores for each metric
                for metric_name, scorer in scorings.items():
                    score = scorer(model, X_test, y_test)
                    scores_df.at[model_name, metric_name] = round(score, 4)
                # -----------------------------------------------------------
                # --------------------- PLOTS ---------------------------
                # Check if the model has a decision_function method
Joaquin Torres's avatar
Joaquin Torres committed
234
                if hasattr(model, "decision_function"):
235
                    # Use the decision function to get scores
Joaquin Torres's avatar
Joaquin Torres committed
236 237
                    y_score = model.decision_function(X_test)
                else:
238 239
                    # Otherwise, use the probability estimates and take the probability of the positive class
                    y_score = model.predict_proba(X_test)[:, 1]
Joaquin Torres's avatar
Joaquin Torres committed
240 241
                # Calculate ROC curve and ROC area for each class
                fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=model.classes_[1])
242 243 244 245 246
                # Plot the ROC curve with thicker line
                roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr)
                roc_display.plot(ax=axes[model_idx][0], lw=2)
                # Plot the diagonal line for the ROC curve
                axes[model_idx][0].plot([0, 1], [0, 1], 'k--', lw=2, label='Random Classifier')
Joaquin Torres's avatar
Joaquin Torres committed
247
                axes[model_idx][0].set_title(f'ROC Curve for {group}-{method_names[j]}-{model_name}')
248 249 250
                axes[model_idx][0].set_xlabel('False Positive Rate')
                axes[model_idx][0].set_ylabel('True Positive Rate')
                axes[model_idx][0].legend(loc='lower right')
Joaquin Torres's avatar
Joaquin Torres committed
251 252
                # Calculate precision-recall curve
                precision, recall, _ = precision_recall_curve(y_test, y_score, pos_label=model.classes_[1])
253 254 255 256 257 258
                # Plot the precision-recall curve with thicker line
                pr_display = PrecisionRecallDisplay(precision=precision, recall=recall)
                pr_display.plot(ax=axes[model_idx][1], lw=2)
                # Plot the baseline for the PR curve
                no_skill = len(y_test[y_test == 1]) / len(y_test)
                axes[model_idx][1].plot([0, 1], [no_skill, no_skill], 'k--', lw=2, label='No Skill')
Joaquin Torres's avatar
Joaquin Torres committed
259
                axes[model_idx][1].set_title(f'PR Curve for {group}-{method_names[j]}-{model_name}')
260 261 262 263
                axes[model_idx][1].set_xlabel('Recall')
                axes[model_idx][1].set_ylabel('Precision')
                axes[model_idx][1].legend(loc='lower left')
                # Predict the test data to get confusion matrix
264
                y_pred = model.predict(X_test)
265
                # Compute confusion matrix
266
                cm = confusion_matrix(y_test, y_pred)
267
                # Plot the confusion matrix
Joaquin Torres's avatar
Joaquin Torres committed
268 269 270 271 272 273 274 275 276
                cmp = ConfusionMatrixDisplay(cm)
                # Deactivate default colorbar
                cmp.plot(ax=axes[model_idx][2], colorbar=False, cmap=sns.color_palette("light:b", as_cmap=True))

                # Adding custom colorbar using make_axes_locatable
                divider = make_axes_locatable(axes[model_idx][2])
                cax = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(cmp.im_, cax=cax)

Joaquin Torres's avatar
Joaquin Torres committed
277
                axes[model_idx][2].set_title(f'CM for {group}-{method_names[j]}-{model_name}')
Joaquin Torres's avatar
Joaquin Torres committed
278 279
                axes[model_idx][2].set_xlabel('Predicted label')
                axes[model_idx][2].set_ylabel('True label')
280
                # ----------------------------------------------------------
281 282
            # Adjust layout and save/show figure
            plt.tight_layout()
Joaquin Torres's avatar
Joaquin Torres committed
283
            plt.savefig(f'./results/testing/plots/{group}_{method_names[j]}.svg', format='svg', dpi=500)
284
            plt.close(fig)
285 286 287 288
            # Store the DataFrame in the dictionary with a unique key for each sheet
            sheet_name = f"{group}_{method_names[j]}"
            scores_sheets[sheet_name] = scores_df
    # Write results to Excel file
Joaquin Torres's avatar
Joaquin Torres committed
289
    with pd.ExcelWriter('./results/testing/testing_tuned_models.xlsx') as writer:
290 291
        for sheet_name, data in scores_sheets.items():
            data.to_excel(writer, sheet_name=sheet_name)
292 293
    print("Successful evaluation with test dataset")
# --------------------------------------------------------------------------------------------------------