hyperparam_tuning.py 9.18 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
"""
    Selecting best models through cross validation and hyperparameter tunning 
    for each method: 
        1. Original training dataset
        2. Original training dataset - Cost sensitive
        3. Oversampling
        4. Undersampling
"""

# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
Joaquin Torres's avatar
Joaquin Torres committed
14

15
from xgboost import XGBClassifier
Joaquin Torres's avatar
Joaquin Torres committed
16
from sklearn.model_selection import StratifiedKFold
17 18 19 20 21 22 23
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import  LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
Joaquin Torres's avatar
Joaquin Torres committed
24
import os 
25 26
# --------------------------------------------------------------------------------------------------------

Joaquin Torres's avatar
Joaquin Torres committed
27
# Function to read training datasets
28 29 30 31
# --------------------------------------------------------------------------------------------------------
def read_data():

    # Load ORIGINAL training data
32 33 34 35
    X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
    y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
    X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
    y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)
36 37

    # Load oversampled training data
38 39 40 41
    X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
    y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
    X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
    y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)
42 43

    # Load undersampled training data
44 45 46 47
    X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
    y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
    X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
    y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

    data_dic = {
        "X_train_pre": X_train_pre,
        "y_train_pre": y_train_pre,
        "X_train_post": X_train_post,
        "y_train_post": y_train_post,
        "X_train_over_pre": X_train_over_pre,
        "y_train_over_pre": y_train_over_pre,
        "X_train_over_post": X_train_over_post,
        "y_train_over_post": y_train_over_post,
        "X_train_under_pre": X_train_under_pre,
        "y_train_under_pre": y_train_under_pre,
        "X_train_under_post": X_train_under_post,
        "y_train_under_post": y_train_under_post,
    }

    return data_dic
# --------------------------------------------------------------------------------------------------------

if __name__ == "__main__":

    # Reading training data
    data_dic = read_data()

    # Defining the models to train
    # --------------------------------------------------------------------------------------------------------
    # 1. No class weight
Joaquin Torres's avatar
Joaquin Torres committed
75
    models_simple = {"DT" : DecisionTreeClassifier(), 
76 77 78 79 80 81 82
            "RF" : RandomForestClassifier(), 
            "Bagging" : BaggingClassifier(),
            "AB" : AdaBoostClassifier(algorithm='SAMME'), 
            "XGB": XGBClassifier(),
            "LR" : LogisticRegression(max_iter=1000), 
            "SVM" : SVC(probability=True, max_iter=1000), 
            "MLP" : MLPClassifier(max_iter=500)
Joaquin Torres's avatar
Joaquin Torres committed
83
            # "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet')
84 85
            }
    
86
    # 2. Class weight: cost-sensitive learning
Joaquin Torres's avatar
Joaquin Torres committed
87
    models_CS = {"DT" : DecisionTreeClassifier(class_weight='balanced'), 
88 89
            "RF" : RandomForestClassifier(class_weight='balanced'), 
            "Bagging" : BaggingClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')),
90
            "AB" : AdaBoostClassifier(estimator= DecisionTreeClassifier(class_weight='balanced'), algorithm='SAMME'),  
91
            "LR" : LogisticRegression(max_iter=1000, class_weight='balanced'), 
Joaquin Torres's avatar
Joaquin Torres committed
92
            "SVM" : SVC(probability=True, max_iter = 1000, class_weight='balanced'), 
Joaquin Torres's avatar
Joaquin Torres committed
93 94
            # "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet', class_weight='balanced'), 
            # "XGB": XGBClassifier(), # <-
95
            # "MLP" : MLPClassifier(max_iter=500) # <-
96
            }
Joaquin Torres's avatar
Joaquin Torres committed
97
    # --------------------------------------------------------------------------------------------------------
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

    # Hyperparameter tuning setup
    # --------------------------------------------------------------------------------------------------------
    hyperparameters = {
        "DT": {'splitter': ['best', 'random'], 
            'max_features': ['sqrt', 'log2'], 
            'criterion': ['gini', 'entropy', 'log_loss']},
        "RF": {'n_estimators': randint(100, 250), 
            'max_features': ['sqrt', 'log2'], 
            'criterion': ['gini', 'entropy']},
        "Bagging": {'n_estimators': randint(10, 100), 
                    'max_samples': [0.8, 1.0], 
                    'max_features': [0.8, 1.0], 
                    'warm_start': [True, False]},
        "AB": {'n_estimators': randint(50, 150), 
            'learning_rate': uniform(0.8, 1.2)},
        "XGB": {'n_estimators': randint(100, 1000), 
                'max_depth': randint(3, 10), 
                'learning_rate': uniform(0.01, 0.3)},
117
        "LR": {'penalty': ['l1', 'l2', 'elasticnet', None], 
118 119 120 121 122 123
            'solver': ['lbfgs', 'sag', 'saga']},
        "SVM": {'C': uniform(0.8, 1.2), 
                'kernel': ['linear', 'poly', 'rbf', 'sigmoid']},
        "MLP": {'activation': ['identity', 'logistic', 'tanh', 'relu'], 
                'hidden_layer_sizes': randint(50, 150), 
                'learning_rate': ['constant', 'invscaling', 'adaptive']}
Joaquin Torres's avatar
Joaquin Torres committed
124
        # "ElNet": {'solver': ['lbfgs', 'sag', 'saga']},
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
    }
    # --------------------------------------------------------------------------------------------------------

    # Cross-validation setup
    # --------------------------------------------------------------------------------------------------------
    # Defining cross-validation protocol
    cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
    method_names = {
        0: "ORIG",
        1: "ORIG_CW",
        2: "OVER",
        3: "UNDER"
    }
    # --------------------------------------------------------------------------------------------------------

    # Hyperparameter tuning loop and exporting results
    # --------------------------------------------------------------------------------------------------------
    # Store each df as a sheet in an excel file
    sheets_dict = {}
144 145
    for i, group in enumerate(['pre', 'post']):
        for j, method in enumerate(['', '', 'over_', 'under_']):
146 147 148 149
            # Get dataset based on group and method
            X = data_dic['X_train_' + method + group]
            y = data_dic['y_train_' + method + group]
            # Use group of models with class weight if needed
Joaquin Torres's avatar
Joaquin Torres committed
150
            models = models_CS if j == 1 else models_simple 
151 152
            # Save results: set of optimal hyperpameters -> mean precision and sd for those parameters across folds
            hyperparam_df = pd.DataFrame(index=list(models.keys()), columns=['Best Parameters','Mean Precision', 'SD'])
153
            for model_name, model in models.items():
Joaquin Torres's avatar
Joaquin Torres committed
154
                print(f"{group}-{method_names[j]}-{model_name}")
155 156
                # Find optimal hyperparams for curr model
                params = hyperparameters[model_name]
Joaquin Torres's avatar
Joaquin Torres committed
157
                search = RandomizedSearchCV(model, param_distributions=params, cv=cv, n_jobs=10, scoring='precision')
158
                search.fit(X,y)
159 160
                # Access the results
                results = search.cv_results_
Joaquin Torres's avatar
Joaquin Torres committed
161
                best_index = search.best_index_
162 163
                # Get sd and mean across folds for best set of hyperpameters
                best_params = search.best_params_
Joaquin Torres's avatar
Joaquin Torres committed
164 165 166
                mean_precision_best = results['mean_test_score'][best_index]
                std_precision_best = results['std_test_score'][best_index]
                # Storing these values
167 168 169
                hyperparam_df.at[model_name, 'Best Parameters'] = best_params
                hyperparam_df.at[model_name, 'Mean Precision'] = round(mean_precision_best, 4)
                hyperparam_df.at[model_name, 'SD'] = round(std_precision_best, 4)
170
            # Store the DataFrame in the dictionary with a unique key for each sheet
Joaquin Torres's avatar
Joaquin Torres committed
171
            sheet_name = f"{group}_{method_names[j]}"
172 173 174
            sheets_dict[sheet_name] = hyperparam_df

    # Write results to Excel file
Joaquin Torres's avatar
Joaquin Torres committed
175
    with pd.ExcelWriter('./output_hyperparam/hyperparamers.xlsx') as writer:
176 177
        for sheet_name, data in sheets_dict.items():
            data.to_excel(writer, sheet_name=sheet_name)
Joaquin Torres's avatar
Joaquin Torres committed
178 179

    print("Successful tuning")
180 181 182
    # --------------------------------------------------------------------------------------------------------