test_models.py 11.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
"""
    Evaluating optimized models with test data
"""

# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
11
from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score, accuracy_score, roc_auc_score, average_precision_score
12 13 14 15 16
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import  LogisticRegression
from sklearn.tree import DecisionTreeClassifier
17 18 19
from sklearn.metrics import RocCurveDisplay, roc_curve
from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve
import matplotlib.pyplot as plt
20
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
21
import ast # String to dictionary
22 23
# --------------------------------------------------------------------------------------------------------

24
# Reading data
25
# --------------------------------------------------------------------------------------------------------
26
def read_data():
27 28 29 30 31 32
    # Load test data
    X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True)
    y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True)
    X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True)
    y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True)

Joaquin Torres's avatar
Joaquin Torres committed
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
    # Load ORIGINAL training data
    X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
    y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
    X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
    y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)

    # Load oversampled training data
    X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
    y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
    X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
    y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)

    # Load undersampled training data
    X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
    y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
    X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
    y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)

51 52 53 54 55
    data_dic = {
        "X_test_pre": X_test_pre,
        "y_test_pre": y_test_pre,
        "X_test_post": X_test_post,
        "y_test_post": y_test_post,
Joaquin Torres's avatar
Joaquin Torres committed
56 57 58 59 60 61 62 63 64 65 66 67
        "X_train_pre": X_train_pre,
        "y_train_pre": y_train_pre,
        "X_train_post": X_train_post,
        "y_train_post": y_train_post,
        "X_train_over_pre": X_train_over_pre,
        "y_train_over_pre": y_train_over_pre,
        "X_train_over_post": X_train_over_post,
        "y_train_over_post": y_train_over_post,
        "X_train_under_pre": X_train_under_pre,
        "y_train_under_pre": y_train_under_pre,
        "X_train_under_post": X_train_under_post,
        "y_train_under_post": y_train_under_post,
68 69 70 71 72 73 74
    }

    return data_dic
# --------------------------------------------------------------------------------------------------------

# Returning tuned models for each situation
# --------------------------------------------------------------------------------------------------------
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
def get_tuned_models(group_str, method_str):

    # Read sheet corresponding to group and method with tuned models and their hyperparam
    tuned_models_df = pd.read_excel("./output_hyperparam/hyperparamers.xlsx",sheet_name=f"{group_str}_{method_str}")
    # Mapping from model abbreviations to sklearn model classes
    model_mapping = {
        'DT': DecisionTreeClassifier,
        'RF': RandomForestClassifier,
        'Bagging': BaggingClassifier,
        'AB': AdaBoostClassifier,
        'XGB': XGBClassifier,
        'LR': LogisticRegression,
        'SVM': SVC,
        'MLP': MLPClassifier
    }
    tuned_models = {}
    # Iterate through each row of the DataFrame
    for index, row in tuned_models_df.iterrows():
        model_name = row[0]
        # Read dictionary
        parameters = ast.literal_eval(row['Parameters'])
        # Add extra parameters 
        if model_name == 'AB':
            parameters['algorithm'] = 'SAMME'
        elif model_name == 'LR':
            parameters['max_iter'] = 1000
        elif model_name == 'SVM':
            parameters['max_iter'] = 1000
            parameters['probability'] = True
        elif model_name == "MLP":
            parameters['max_iter'] = 500
        # Add class_weight argument for cost-sensitive learning method
        if 'CW' in method_str:
            if model_name == 'Bagging' or model_name == 'AB':
                parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced')
            else:
                parameters['class_weight'] = 'balanced'
        # Fetch class
        model_class = model_mapping[model_name]
        # Initialize model
        tuned_models[model_name] = model_class(**parameters)
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
    return tuned_models
# --------------------------------------------------------------------------------------------------------

# Scorers
# --------------------------------------------------------------------------------------------------------
def TN_scorer(clf, X, y):
    """Gives the number of samples predicted as true negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN = cm[0,0]
    return TN
def FN_scorer(clf, X, y):
    """Gives the number of samples predicted as false negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    FN = cm[0,1]
    return FN
def FP_scorer(clf, X, y):
    """Gives the number of samples predicted as false positive"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    FP = cm[1,0]
    return FP
def TP_scorer(clf, X, y):
    """Gives the number of samples predicted as true positive"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TP = cm[1,1]
    return TP

def negative_recall_scorer(clf, X, y):
    """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN_prop = cm[0,0]/(cm[0,1]+cm[0,0])
    return TN_prop
# --------------------------------------------------------------------------------------------------------

if __name__ == "__main__":
155 156
    # Reading data
    data_dic = read_data()
157 158 159 160 161 162 163 164 165 166 167 168 169

    # Setup
    # --------------------------------------------------------------------------------------------------------
    # Scorings to use for model evaluation
    scorings = {
        'F1':make_scorer(f1_score), 
        'NREC': negative_recall_scorer, 
        'REC':make_scorer(recall_score), 
        'PREC':make_scorer(precision_score), 
        'ACC': make_scorer(accuracy_score),
        'TN':TN_scorer, 
        'FN':FN_scorer, 
        'FP':FP_scorer, 
170
        'TP':TP_scorer,
Joaquin Torres's avatar
Joaquin Torres committed
171 172
        'AUROC': make_scorer(roc_auc_score),  # AUROC requires decision function or probability outputs
        'AUPRC': make_scorer(average_precision_score)  # AUPRC requires probability outputs
173
        } 
174 175 176 177 178 179 180 181 182 183 184 185 186
    method_names = {
        0: "ORIG",
        1: "ORIG_CW",
        2: "OVER",
        3: "UNDER"
    }
    # --------------------------------------------------------------------------------------------------------

    # Evaluating performance using test dataset
    # --------------------------------------------------------------------------------------------------------
    scores_sheets = {} # To store score dfs as sheets in the same excel file
    for i, group in enumerate(['pre', 'post']):
        # Get test dataset based on group
Joaquin Torres's avatar
Joaquin Torres committed
187 188
        X_test = data_dic['X_test_' + group]
        y_test = data_dic['y_test_' + group]
189
        for j, method in enumerate(['', '', 'over_', 'under_']):
190
            print(f"{group}-{method_names[j]}")
Joaquin Torres's avatar
Joaquin Torres committed
191 192 193
            # Get train dataset based on group and method
            X_train = data_dic['X_train_' + method + group]
            y_train = data_dic['y_train_' + method + group]
194
            # Get tuned models for this group and method
195
            models = get_tuned_models(group, method_names[j])
196 197
            # Scores df
            scores_df = pd.DataFrame(index=models.keys(), columns=scorings.keys())
198
            # Create a figure for all models in this group-method
199
            fig, axes = plt.subplots(len(models), 3, figsize=(10, 8 * len(models)))
200 201
            if len(models) == 1:  # Adjustment if there's only one model (axes indexing issue)
                axes = [axes]
202
            # Evaluate each model
203
            for model_idx, (model_name, model) in enumerate(models.items()):
Joaquin Torres's avatar
Joaquin Torres committed
204
                # ----------- TEMPORAL -------------
Joaquin Torres's avatar
Joaquin Torres committed
205 206 207 208 209 210 211 212 213 214 215 216
                # Train the model (it was just initialized above)
                model.fit(X_train, y_train)
                if hasattr(model, "decision_function"):
                    y_score = model.decision_function(X_test)
                else:
                    y_score = model.predict_proba(X_test)[:, 1]  # Use probability of positive class
                # Calculate ROC curve and ROC area for each class
                fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=model.classes_[1])
                roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot(ax=axes[model_idx][0]) 
                # Calculate precision-recall curve
                precision, recall, _ = precision_recall_curve(y_test, y_score, pos_label=model.classes_[1])
                pr_display = PrecisionRecallDisplay(precision=precision, recall=recall).plot(ax=axes[model_idx][1])
217 218 219
                # Get confusion matrix plot
                y_pred = model.predict(X_test)
                cm = confusion_matrix(y_test, y_pred)
Joaquin Torres's avatar
Joaquin Torres committed
220
                ConfusionMatrixDisplay(cm).plot(ax=axes[model_idx][2])
221
                # Give name to plots
Joaquin Torres's avatar
Joaquin Torres committed
222 223
                axes[model_idx][0].set_title(f'ROC Curve for {model_name}')
                axes[model_idx][1].set_title(f'PR Curve for {model_name}')
224
                axes[model_idx][2].set_title(f'CM for {model_name}')
Joaquin Torres's avatar
Joaquin Torres committed
225 226 227 228
                # Evaluate at each of the scores of interest
                for score_name, scorer in scorings.items():
                    score_value = scorer(model, X_test, y_test)
                    scores_df.at[model_name, score_name] = score_value
229 230
            # Adjust layout and save/show figure
            plt.tight_layout()
231
            plt.savefig(f'./test_results/aux_plots/{group}_{method_names[j]}.svg', format='svg', dpi=500)
232
            plt.close(fig)
233 234 235 236
            # Store the DataFrame in the dictionary with a unique key for each sheet
            sheet_name = f"{group}_{method_names[j]}"
            scores_sheets[sheet_name] = scores_df
    # Write results to Excel file
Joaquin Torres's avatar
Joaquin Torres committed
237
    with pd.ExcelWriter('./test_results/testing_tuned_models.xlsx') as writer:
238 239
        for sheet_name, data in scores_sheets.items():
            data.to_excel(writer, sheet_name=sheet_name)
240
# --------------------------------------------------------------------------------------------------------
241 242