train_models.py 7.08 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
"""
    Selecting best models through cross validation and hyperparameter tunning 
    for each method: 
        1. Original training dataset
        2. Original training dataset - Cost sensitive
        3. Oversampling
        4. Undersampling
"""

# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score
from sklearn.model_selection import StratifiedKFold, cross_validate
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import  LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# --------------------------------------------------------------------------------------------------------


26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
def negative_recall_scorer(clf, X, y):
    """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN_prop = cm[0,0]/(cm[0,1]+cm[0,0])
    return TN_prop

def TN_scorer(clf, X, y):
    """Gives the number of samples predicted as true negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TN = cm[0,0]
    return TN
def FN_scorer(clf, X, y):
    """Gives the number of samples predicted as false negatives"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    FN = cm[0,1]
    return FN
def FP_scorer(clf, X, y):
    """Gives the number of samples predicted as false positive"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    FP = cm[1,0]
    return FP
def TP_scorer(clf, X, y):
    """Gives the number of samples predicted as true positive"""
    y_pred = clf.predict(X)
    cm = confusion_matrix(y, y_pred)
    TP = cm[1,1]
    return TP

def read_data():
    import numpy as np
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83

    # Load test data
    X_test_pre = np.load('gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True)
    y_test_pre = np.load('gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True)
    X_test_post = np.load('gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True)
    y_test_post = np.load('gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True)

    # Load ORIGINAL training data
    X_train_pre = np.load('gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
    y_train_pre = np.load('gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
    X_train_post = np.load('gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
    y_train_post = np.load('gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)

    # Load oversampled training data
    X_train_over_pre = np.load('gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
    y_train_over_pre = np.load('gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
    X_train_over_post = np.load('gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
    y_train_over_post = np.load('gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)

    # Load undersampled training data
    X_train_under_pre = np.load('gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
    y_train_under_pre = np.load('gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
    X_train_under_post = np.load('gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
    y_train_under_post = np.load('gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

    data_dic = {
        "X_test_pre": X_test_pre,
        "y_test_pre": y_test_pre,
        "X_test_post": X_test_post,
        "y_test_post": y_test_post,
        "X_train_pre": X_train_pre,
        "y_train_pre": y_train_pre,
        "X_train_post": X_train_post,
        "y_train_post": y_train_post,
        "X_train_over_pre": X_train_over_pre,
        "y_train_over_pre": y_train_over_pre,
        "X_train_over_post": X_train_over_post,
        "y_train_over_post": y_train_over_post,
        "X_train_under_pre": X_train_under_pre,
        "y_train_under_pre": y_train_under_pre,
        "X_train_under_post": X_train_under_post,
        "y_train_under_post": y_train_under_post,
    }

    return data_dic

if __name__ == "__main__":

    # Reading training data
    data_dic = read_data()
110 111 112 113 114

    # Defining the models to train
    # --------------------------------------------------------------------------------------------------------
    # 1. No class weight
    models_1 = {"DT" : DecisionTreeClassifier(), 
115 116 117 118 119 120 121 122
            # "RF" : RandomForestClassifier(), 
            # "Bagging" : BaggingClassifier(),
            # "AB" : AdaBoostClassifier(), 
            # "XGB": XGBClassifier(),
            # "LR" : LogisticRegression(), 
            # "ElNet" : LogisticRegression(penalty='elasticnet'), 
            # "SVM" : SVC(), 
            # "MLP" : MLPClassifier(),
123 124 125 126
            }
    
    # 2. Class weight 
    models_2 = {"DT" : DecisionTreeClassifier(class_weight='balanced'), 
127 128 129 130 131 132 133 134
            # "RF" : RandomForestClassifier(class_weight='balanced'), 
            # "Bagging" : BaggingClassifier(), # <-
            # "AB" : AdaBoostClassifier(),  # <-
            # "XGB": XGBClassifier(), # <-
            # "LR" : LogisticRegression(class_weight='balanced'), 
            # "ElNet" : LogisticRegression(penalty='elasticnet', class_weight='balanced'), 
            # "SVM" : SVC(class_weight='balanced'), 
            # "MLP" : MLPClassifier(), # <-
135 136
            }
    # --------------------------------------------------------------------------------------------------------
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

    # Setup
    # --------------------------------------------------------------------------------------------------------
    # Scorings to use for model evaluation
    scorings = {'f1':make_scorer(f1_score), 'negative_recall': negative_recall_scorer, 'recall':make_scorer(recall_score), 'precision':make_scorer(precision_score), 'TN':TN_scorer, 'FN':FN_scorer, 'FP':FP_scorer, 'TP':TP_scorer}
    # Defining cross-validation protocol
    cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
    # --------------------------------------------------------------------------------------------------------

    for i, group in enumerate(['pre', 'post']):
        for j, method in enumerate(['', '', 'over_', 'under_']):
            # Get dataset based on group and method
            X = data_dic['X_train_' + method + group]
            y = data_dic['y_train_' + method + group]
            # Use group of models with class weight if needed
            models = models_2 if j == 2 else models_1 
            # Create df to keep track of each group-method for all its models
            results = pd.DataFrame()
            for model_name, model in models.items():
                cv_results = cross_validate(model, X, y, scoring=scorings, cv=cv, return_train_score=True, n_jobs=1)