From 51eae1a78453f1afcaccf7f73655ed4146b65b2f Mon Sep 17 00:00:00 2001 From: Joaquin Torres Bravo Date: Wed, 8 May 2024 13:02:32 +0200 Subject: [PATCH] fixing small details, getting ready to run script --- training_models/hyperparam_tuning.py | 70 ++++++++++++++-------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/training_models/hyperparam_tuning.py b/training_models/hyperparam_tuning.py index 05e0bcd..e3e6238 100644 --- a/training_models/hyperparam_tuning.py +++ b/training_models/hyperparam_tuning.py @@ -30,28 +30,28 @@ def read_data(): import numpy as np # Load test data - X_test_pre = np.load('./gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True) - y_test_pre = np.load('./gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True) - X_test_post = np.load('./gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True) - y_test_post = np.load('./gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True) + X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True) + y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True) + X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True) + y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True) # Load ORIGINAL training data - X_train_pre = np.load('./gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True) - y_train_pre = np.load('./gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True) - X_train_post = np.load('./gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True) - y_train_post = np.load('./gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True) + X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True) + y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True) + X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True) + y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True) # Load oversampled training data - X_train_over_pre = np.load('./gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True) - y_train_over_pre = np.load('./gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True) - X_train_over_post = np.load('./gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True) - y_train_over_post = np.load('./gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True) + X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True) + y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True) + X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True) + y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True) # Load undersampled training data - X_train_under_pre = np.load('./gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True) - y_train_under_pre = np.load('./gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True) - X_train_under_post = np.load('./gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True) - y_train_under_post = np.load('./gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True) + X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True) + y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True) + X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True) + y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True) data_dic = { "X_test_pre": X_test_pre, @@ -84,26 +84,26 @@ if __name__ == "__main__": # -------------------------------------------------------------------------------------------------------- # 1. No class weight models_1 = {"DT" : DecisionTreeClassifier(), - # "RF" : RandomForestClassifier(n_estimators=50), - # "Bagging" : BaggingClassifier(), - # "AB" : AdaBoostClassifier(), - # "XGB": XGBClassifier(), - # "LR" : LogisticRegression(max_iter=1000), - # "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - # "SVM" : SVC(probability=True), - # "MLP" : MLPClassifier(max_iter=500), + "RF" : RandomForestClassifier(), + "Bagging" : BaggingClassifier(), + "AB" : AdaBoostClassifier(), + "XGB": XGBClassifier(), + "LR" : LogisticRegression(max_iter=1000), + "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), + "SVM" : SVC(probability=True), + "MLP" : MLPClassifier(max_iter=500) } - # 2. Class weight + # 2. Class weight: cost-sensitive learning models_2 = {"DT" : DecisionTreeClassifier(class_weight='balanced'), - # "RF" : RandomForestClassifier(n_estimators=50, class_weight='balanced'), - # "Bagging" : BaggingClassifier(), # <- - # "AB" : AdaBoostClassifier(), # <- + "RF" : RandomForestClassifier(class_weight='balanced'), + "Bagging" : BaggingClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')), + "AB" : AdaBoostClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')), # "XGB": XGBClassifier(), # <- - # "LR" : LogisticRegression(max_iter=1000, class_weight='balanced'), - # "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet', class_weight='balanced'), - # "SVM" : SVC(probability=True, class_weight='balanced'), - # "MLP" : MLPClassifier(max_iter=500), # <- + "LR" : LogisticRegression(max_iter=1000, class_weight='balanced'), + "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet', class_weight='balanced'), + "SVM" : SVC(probability=True, class_weight='balanced'), + # "MLP" : MLPClassifier(max_iter=500) } # Hyperparameter tuning setup @@ -152,8 +152,9 @@ if __name__ == "__main__": # Store each df as a sheet in an excel file sheets_dict = {} for i, group in enumerate(['pre', 'post']): + print(group, end = ' ') for j, method in enumerate(['', '', 'over_', 'under_']): - print(f"ITERATION {i+j}") + print(method, end = ' ') # Get dataset based on group and method X = data_dic['X_train_' + method + group] y = data_dic['y_train_' + method + group] @@ -162,6 +163,7 @@ if __name__ == "__main__": # Save results: params and best score for each of the mdodels of this method and group hyperparam_df = pd.DataFrame(index=list(models.keys()), columns=['Parameters','Score']) for model_name, model in models.items(): + print(model_name + "\n\n") # Find optimal hyperparams for curr model params = hyperparameters[model_name] search = RandomizedSearchCV(model, param_distributions=params, cv=cv, n_jobs=1, scoring='precision') @@ -174,7 +176,7 @@ if __name__ == "__main__": sheets_dict[sheet_name] = hyperparam_df # Write results to Excel file - with pd.ExcelWriter('./training_models/output/hyperparam.xlsx') as writer: + with pd.ExcelWriter('./output/hyperparam.xlsx') as writer: for sheet_name, data in sheets_dict.items(): data.to_excel(writer, sheet_name=sheet_name) # -------------------------------------------------------------------------------------------------------- -- 2.24.1