diff --git a/model_selection/hyperparam_tuning.py b/model_selection/hyperparam_tuning.py index d90158ddd48c97137f30ccc0a297ec7878b42753..4864b2cfc05d929b8439c1eb9bf5a768f4a41e14 100644 --- a/model_selection/hyperparam_tuning.py +++ b/model_selection/hyperparam_tuning.py @@ -79,7 +79,7 @@ if __name__ == "__main__": "AB" : AdaBoostClassifier(algorithm='SAMME'), "XGB": XGBClassifier(), "LR" : LogisticRegression(max_iter=1000), - "SVM" : SVC(max_iter=1000), + "SVM" : SVC(probability=True, max_iter=1000), "MLP" : MLPClassifier(max_iter=500) # "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet') } @@ -90,7 +90,7 @@ if __name__ == "__main__": "Bagging" : BaggingClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')), "AB" : AdaBoostClassifier(estimator= DecisionTreeClassifier(class_weight='balanced'), algorithm='SAMME'), "LR" : LogisticRegression(max_iter=1000, class_weight='balanced'), - "SVM" : SVC(max_iter = 1000, class_weight='balanced'), + "SVM" : SVC(probability=True, max_iter = 1000, class_weight='balanced'), # "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet', class_weight='balanced'), # "XGB": XGBClassifier(), # <- # "MLP" : MLPClassifier(max_iter=500) # <- @@ -142,18 +142,17 @@ if __name__ == "__main__": # -------------------------------------------------------------------------------------------------------- # Store each df as a sheet in an excel file sheets_dict = {} - for i, group in enumerate(['post']): - for j, method in enumerate(['']): #['', '', 'over_', 'under_'] + for i, group in enumerate(['pre', 'post']): + for j, method in enumerate(['', '', 'over_', 'under_']): # Get dataset based on group and method X = data_dic['X_train_' + method + group] y = data_dic['y_train_' + method + group] # Use group of models with class weight if needed - # models = models_CS if j == 1 else models_simple - models = models_CS + models = models_CS if j == 1 else models_simple # Save results: params and best score for each of the mdodels of this method and group hyperparam_df = pd.DataFrame(index=list(models.keys()), columns=['Parameters','Score']) for model_name, model in models.items(): - print(f"{group}-{method_names[1]}-{model_name}") + print(f"{group}-{method_names[j]}-{model_name}") # Find optimal hyperparams for curr model params = hyperparameters[model_name] search = RandomizedSearchCV(model, param_distributions=params, cv=cv, n_jobs=8, scoring='precision') @@ -162,11 +161,11 @@ if __name__ == "__main__": hyperparam_df.at[model_name,'Score']=round(search.best_score_,4) # Store the DataFrame in the dictionary with a unique key for each sheet - sheet_name = f"{group}_{method_names[1]}" + sheet_name = f"{group}_{method_names[j]}" sheets_dict[sheet_name] = hyperparam_df # Write results to Excel file - with pd.ExcelWriter('./output/hyperparam_post_ORIG_CS.xlsx') as writer: + with pd.ExcelWriter('./output/hyperparamers_pre_and_post') as writer: for sheet_name, data in sheets_dict.items(): data.to_excel(writer, sheet_name=sheet_name) diff --git a/model_selection/test_models.py b/model_selection/test_models.py index 58c433c68bdd2fd19000452a09e9c7890fc91b5b..2624beb6d37791f47b9d48e1f97a02fb5c92d978 100644 --- a/model_selection/test_models.py +++ b/model_selection/test_models.py @@ -49,7 +49,7 @@ def get_tuned_models(group_id, method_id): "AB" : AdaBoostClassifier(**{'learning_rate': 1.9189147333140566, 'n_estimators': 131, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.22870029177880222, 'max_depth': 8, 'n_estimators': 909}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), - #"SVM" : SVC(**{'C': 0.9872682949695772, 'kernel': 'linear', 'max_iter':1000}), + #"SVM" : SVC(**{'C': 0.9872682949695772, 'kernel': 'linear', 'max_iter':1000, 'probability': True}), "MLP" : MLPClassifier(**{'activation': 'identity', 'hidden_layer_sizes': 122, 'learning_rate': 'invscaling', 'max_iter':500}) } # 1.2) Trained with original dataset and cost-sensitive learning @@ -60,7 +60,7 @@ def get_tuned_models(group_id, method_id): "Bagging": BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 15, 'warm_start': False, 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "AB": AdaBoostClassifier(**{'learning_rate': 0.8159074545140872, 'n_estimators': 121, 'algorithm': 'SAMME', 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "LR": LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000, 'class_weight': 'balanced'}), - #"SVM": SVC(**{'C': 1.5550524351360953, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced'}), + #"SVM": SVC(**{'C': 1.5550524351360953, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced', 'probability': True}), } # 1.3) Trained with oversampled training dataset elif method_id == 2: @@ -71,7 +71,7 @@ def get_tuned_models(group_id, method_id): "AB" : AdaBoostClassifier(**{'learning_rate': 1.6590924545876917, 'n_estimators': 141, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.26946295284728783, 'max_depth': 7, 'n_estimators': 893}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), - #"SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000}), + #"SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000, 'probability': True}), "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 116, 'learning_rate': 'invscaling', 'max_iter':500}) } # 1.4) Trained with undersampled training dataset @@ -83,7 +83,7 @@ def get_tuned_models(group_id, method_id): "AB" : AdaBoostClassifier(**{'learning_rate': 1.6996764264041269, 'n_estimators': 93, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.26480707899668926, 'max_depth': 7, 'n_estimators': 959}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), - #"SVM" : SVC(**{'C': 1.1996501173654208, 'kernel': 'poly', 'max_iter':1000}), + #"SVM" : SVC(**{'C': 1.1996501173654208, 'kernel': 'poly', 'max_iter':1000, 'probability': True}), "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 131, 'learning_rate': 'constant', 'max_iter':500}) } # 2. POST @@ -97,7 +97,7 @@ def get_tuned_models(group_id, method_id): "AB" : AdaBoostClassifier(**{'learning_rate': 1.7806904141367559, 'n_estimators': 66, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.21889089898592098, 'max_depth': 6, 'n_estimators': 856}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), - #"SVM" : SVC(**{'C': 1.9890638540240584, 'kernel': 'linear', 'max_iter':1000}), + #"SVM" : SVC(**{'C': 1.9890638540240584, 'kernel': 'linear', 'max_iter':1000, 'probability': True}), "MLP" : MLPClassifier(**{'activation': 'logistic', 'hidden_layer_sizes': 112, 'learning_rate': 'constant', 'max_iter':500}) } # 2.2) Trained with original dataset and cost-sensitive learning @@ -108,7 +108,7 @@ def get_tuned_models(group_id, method_id): "Bagging": BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 11, 'warm_start': True, 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "AB": AdaBoostClassifier(**{'learning_rate': 1.7102248217141944, 'n_estimators': 108, 'algorithm': 'SAMME', 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "LR": LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000, 'class_weight': 'balanced'}), - #"SVM": SVC(**{'C': 1.1313840454519628, 'kernel': 'sigmoid', 'max_iter': 1000, 'class_weight': 'balanced'}) + #"SVM": SVC(**{'C': 1.1313840454519628, 'kernel': 'sigmoid', 'max_iter': 1000, 'class_weight': 'balanced', 'probability': True}) } # 2.3) Trained with oversampled training dataset elif method_id == 2: @@ -119,7 +119,7 @@ def get_tuned_models(group_id, method_id): # "AB" : AdaBoostClassifier(**{'learning_rate': 1.6590924545876917, 'n_estimators': 141, 'algorithm': 'SAMME'}), # "XGB": XGBClassifier(**{'learning_rate': 0.26946295284728783, 'max_depth': 7, 'n_estimators': 893}), # "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), - # "SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000}), + # "SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000, 'probability': True}), # "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 116, 'learning_rate': 'invscaling', 'max_iter':500}) } # 2.4) Trained with undersampled training dataset @@ -131,7 +131,7 @@ def get_tuned_models(group_id, method_id): "AB" : AdaBoostClassifier(**{'learning_rate': 1.836659462701278, 'n_estimators': 138, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.2517946893282251, 'max_depth': 4, 'n_estimators': 646}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), - #"SVM" : SVC(**{'C': 1.8414678085000697, 'kernel': 'linear', 'max_iter':1000}), + #"SVM" : SVC(**{'C': 1.8414678085000697, 'kernel': 'linear', 'max_iter':1000, 'probability': True}), "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 76, 'learning_rate': 'constant', 'max_iter':500}) } return tuned_models