hyperparam_tuning.py 8.27 KB
Newer Older
1
"""
2 3
    Finding optimal hyperparameters through RandomSearchCV for each group (1. pre - 2. post) 
    and method: 
4 5 6 7 8 9 10 11 12 13
        1. Original training dataset
        2. Original training dataset - Cost sensitive
        3. Oversampling
        4. Undersampling
"""

# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
Joaquin Torres's avatar
Joaquin Torres committed
14

15
from xgboost import XGBClassifier
Joaquin Torres's avatar
Joaquin Torres committed
16
from sklearn.model_selection import StratifiedKFold
17 18 19 20 21 22 23 24 25
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import  LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
# --------------------------------------------------------------------------------------------------------

Joaquin Torres's avatar
Joaquin Torres committed
26
# Function to read training datasets
27 28 29 30
# --------------------------------------------------------------------------------------------------------
def read_data():

    # Load ORIGINAL training data
31 32 33 34
    X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
    y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
    X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
    y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)
35 36

    # Load oversampled training data
37 38 39 40
    X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
    y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
    X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
    y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)
41 42

    # Load undersampled training data
43 44 45 46
    X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
    y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
    X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
    y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

    data_dic = {
        "X_train_pre": X_train_pre,
        "y_train_pre": y_train_pre,
        "X_train_post": X_train_post,
        "y_train_post": y_train_post,
        "X_train_over_pre": X_train_over_pre,
        "y_train_over_pre": y_train_over_pre,
        "X_train_over_post": X_train_over_post,
        "y_train_over_post": y_train_over_post,
        "X_train_under_pre": X_train_under_pre,
        "y_train_under_pre": y_train_under_pre,
        "X_train_under_post": X_train_under_post,
        "y_train_under_post": y_train_under_post,
    }

    return data_dic
# --------------------------------------------------------------------------------------------------------

if __name__ == "__main__":

    # Reading training data
    data_dic = read_data()

    # Defining the models to train
    # --------------------------------------------------------------------------------------------------------
    # 1. No class weight
Joaquin Torres's avatar
Joaquin Torres committed
74
    models_simple = {"DT" : DecisionTreeClassifier(), 
Joaquin Torres's avatar
Joaquin Torres committed
75 76 77 78 79 80 81
            "RF" : RandomForestClassifier(), 
            "Bagging" : BaggingClassifier(),
            "AB" : AdaBoostClassifier(algorithm='SAMME'), 
            "XGB": XGBClassifier(),
            "LR" : LogisticRegression(max_iter=1000), 
            "SVM" : SVC(probability=True, max_iter=1000), 
            "MLP" : MLPClassifier(max_iter=500)
82 83
            }
    
84
    # 2. Class weight: cost-sensitive learning
Joaquin Torres's avatar
Joaquin Torres committed
85
    models_CS = {"DT" : DecisionTreeClassifier(class_weight='balanced'), 
Joaquin Torres's avatar
Joaquin Torres committed
86 87 88 89 90
            "RF" : RandomForestClassifier(class_weight='balanced'), 
            "Bagging" : BaggingClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')),
            "AB" : AdaBoostClassifier(estimator= DecisionTreeClassifier(class_weight='balanced'), algorithm='SAMME'),  
            "LR" : LogisticRegression(max_iter=1000, class_weight='balanced'), 
            "SVM" : SVC(probability=True, max_iter = 1000, class_weight='balanced')
91
            }
Joaquin Torres's avatar
Joaquin Torres committed
92
    # --------------------------------------------------------------------------------------------------------
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111

    # Hyperparameter tuning setup
    # --------------------------------------------------------------------------------------------------------
    hyperparameters = {
        "DT": {'splitter': ['best', 'random'], 
            'max_features': ['sqrt', 'log2'], 
            'criterion': ['gini', 'entropy', 'log_loss']},
        "RF": {'n_estimators': randint(100, 250), 
            'max_features': ['sqrt', 'log2'], 
            'criterion': ['gini', 'entropy']},
        "Bagging": {'n_estimators': randint(10, 100), 
                    'max_samples': [0.8, 1.0], 
                    'max_features': [0.8, 1.0], 
                    'warm_start': [True, False]},
        "AB": {'n_estimators': randint(50, 150), 
            'learning_rate': uniform(0.8, 1.2)},
        "XGB": {'n_estimators': randint(100, 1000), 
                'max_depth': randint(3, 10), 
                'learning_rate': uniform(0.01, 0.3)},
112
        "LR": {'penalty': ['l1', 'l2', 'elasticnet', None], 
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
            'solver': ['lbfgs', 'sag', 'saga']},
        "SVM": {'C': uniform(0.8, 1.2), 
                'kernel': ['linear', 'poly', 'rbf', 'sigmoid']},
        "MLP": {'activation': ['identity', 'logistic', 'tanh', 'relu'], 
                'hidden_layer_sizes': randint(50, 150), 
                'learning_rate': ['constant', 'invscaling', 'adaptive']}
    }
    # --------------------------------------------------------------------------------------------------------

    # Cross-validation setup
    # --------------------------------------------------------------------------------------------------------
    # Defining cross-validation protocol
    cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
    method_names = {
        0: "ORIG",
        1: "ORIG_CW",
        2: "OVER",
        3: "UNDER"
    }
    # --------------------------------------------------------------------------------------------------------

    # Hyperparameter tuning loop and exporting results
    # --------------------------------------------------------------------------------------------------------
    # Store each df as a sheet in an excel file
    sheets_dict = {}
138 139
    for i, group in enumerate(['pre', 'post']):
        for j, method in enumerate(['', '', 'over_', 'under_']):
140 141 142 143
            # Get dataset based on group and method
            X = data_dic['X_train_' + method + group]
            y = data_dic['y_train_' + method + group]
            # Use group of models with class weight if needed
Joaquin Torres's avatar
Joaquin Torres committed
144
            models = models_CS if j == 1 else models_simple 
145
            # Save optimal hyperparameters for each of the models -> metrics will be computed in a different file
146
            hyperparam_df = pd.DataFrame(index=list(models.keys()), columns=['Best Parameters'])
147
            for model_name, model in models.items():
Joaquin Torres's avatar
Joaquin Torres committed
148
                print(f"{group}-{method_names[j]}-{model_name}")
149 150
                # Find optimal hyperparams for curr model
                params = hyperparameters[model_name]
Joaquin Torres's avatar
Joaquin Torres committed
151
                search = RandomizedSearchCV(model, param_distributions=params, cv=cv, n_jobs=10, scoring='precision')
152
                search.fit(X,y)
153
                # Keep optimal parameters
154 155
                best_params = search.best_params_
                hyperparam_df.at[model_name, 'Best Parameters'] = best_params
156
            # Store the DataFrame in the dictionary with a unique key for each sheet
Joaquin Torres's avatar
Joaquin Torres committed
157
            sheet_name = f"{group}_{method_names[j]}"
158 159 160
            sheets_dict[sheet_name] = hyperparam_df

    # Write results to Excel file
Joaquin Torres's avatar
Joaquin Torres committed
161
    with pd.ExcelWriter('./output_hyperparam/hyperparamers.xlsx') as writer:
162 163
        for sheet_name, data in sheets_dict.items():
            data.to_excel(writer, sheet_name=sheet_name)
Joaquin Torres's avatar
Joaquin Torres committed
164 165

    print("Successful tuning")
166
    # --------------------------------------------------------------------------------------------------------