diff --git a/model_selection/output/hyperparam_post.xlsx b/model_selection/output/hyperparam_post.xlsx index 66037db22b1306121004c8c8ce6a1414f41aa28b..5d04496f151ed7200fe8a27aa23fcf6af97fbbdd 100644 Binary files a/model_selection/output/hyperparam_post.xlsx and b/model_selection/output/hyperparam_post.xlsx differ diff --git a/model_selection/output/hyperparam_pre_CS.xlsx b/model_selection/output/hyperparam_pre_CS.xlsx index becfef04dcf1022c725b2e56a8dcb333d78973cb..853249595517a431427e2d2fc56d0285153bfad3 100644 Binary files a/model_selection/output/hyperparam_pre_CS.xlsx and b/model_selection/output/hyperparam_pre_CS.xlsx differ diff --git a/model_selection/output/hyperparam_pre_ORIG.xlsx b/model_selection/output/hyperparam_pre_ORIG.xlsx index acc975e69af393ad24254e68718c521cdc0f291c..61a62453027e2608a4a1e6ccc5e61a6ad287af5a 100644 Binary files a/model_selection/output/hyperparam_pre_ORIG.xlsx and b/model_selection/output/hyperparam_pre_ORIG.xlsx differ diff --git a/model_selection/output/hyperparam_pre_OVER.xlsx b/model_selection/output/hyperparam_pre_OVER.xlsx index 88bc7d49f604fb1992b5d5c2dfede07081c70c56..0c0bb36e93477902f43a173a3efb4532329e0391 100644 Binary files a/model_selection/output/hyperparam_pre_OVER.xlsx and b/model_selection/output/hyperparam_pre_OVER.xlsx differ diff --git a/model_selection/output/hyperparam_pre_UNDER.xlsx b/model_selection/output/hyperparam_pre_UNDER.xlsx index 81b3c90c316fe8adc3587295a057325f9bd76db2..0e169ed177f2742d6aa38d3a84ddb5f6bc87e153 100644 Binary files a/model_selection/output/hyperparam_pre_UNDER.xlsx and b/model_selection/output/hyperparam_pre_UNDER.xlsx differ diff --git a/model_selection/test_models.py b/model_selection/test_models.py index de36a19ff247c93d6b77ebc6d234fe573ef138a2..3eda93cc279d1c74b3ed507c8c3577c1a25fc118 100644 --- a/model_selection/test_models.py +++ b/model_selection/test_models.py @@ -43,109 +43,72 @@ def get_tuned_models(group_id, method_id): # 1.1) Trained with original dataset if method_id == 0: tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) + "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'gini'}), + "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 117}), + "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 23, 'warm_start': True}), + "AB" : AdaBoostClassifier(**{'learning_rate': 1.9189147333140566, 'n_estimators': 131, 'algorithm': 'SAMME'}), + "XGB": XGBClassifier(**{'learning_rate': 0.22870029177880222, 'max_depth': 8, 'n_estimators': 909}), + "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), + "SVM" : SVC(**{'C': 0.9872682949695772, 'kernel': 'linear', 'max_iter':1000}), + "MLP" : MLPClassifier(**{'activation': 'identity', 'hidden_layer_sizes': 122, 'learning_rate': 'invscaling', 'max_iter':500}) } # 1.2) Trained with original dataset and cost-sensitive learning elif method_id == 1: tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) + "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'log_loss', 'class_weight': 'balanced'}), + "RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 118, 'class_weight': 'balanced'}), + "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 15, 'warm_start': False}, estimator= DecisionTreeClassifier(class_weight='balanced')), + "AB" : AdaBoostClassifier(**{'learning_rate': 0.8159074545140872, 'n_estimators': 121}, estimator= DecisionTreeClassifier(class_weight='balanced'), algorithm='SAMME'), + "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None}, max_iter=1000, class_weight='balanced'), + "SVM" : SVC(**{'C': 1.5550524351360953, 'kernel': 'linear'}, max_iter = 1000, class_weight='balanced'), } # 1.3) Trained with oversampled training dataset elif method_id == 2: tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) + "DT" : DecisionTreeClassifier(**{'splitter': 'random', 'max_features': 'sqrt', 'criterion': 'log_loss'}), + "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 135}), + "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 26, 'warm_start': True}), + "AB" : AdaBoostClassifier(**{'learning_rate': 1.6590924545876917, 'n_estimators': 141, 'algorithm': 'SAMME'}), + "XGB": XGBClassifier(**{'learning_rate': 0.26946295284728783, 'max_depth': 7, 'n_estimators': 893}), + "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), + "SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000}), + "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 116, 'learning_rate': 'invscaling', 'max_iter':500}) } - # 1. 4) Trained with undersampled training dataset + # 1.4) Trained with undersampled training dataset elif method_id == 3: tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) + "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'gini'}), + "RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 104}), + "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 38, 'warm_start': True}), + "AB" : AdaBoostClassifier(**{'learning_rate': 1.6996764264041269, 'n_estimators': 93, 'algorithm': 'SAMME'}), + "XGB": XGBClassifier(**{'learning_rate': 0.26480707899668926, 'max_depth': 7, 'n_estimators': 959}), + "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), + "SVM" : SVC(**{'C': 1.1996501173654208, 'kernel': 'poly', 'max_iter':1000}), + "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 131, 'learning_rate': 'constant', 'max_iter':500}) } # 2. POST else: # 2.1) Trained with original dataset if method_id == 0: tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) + "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'gini'}), + "RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 213}), + "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 32, 'warm_start': True}), + "AB" : AdaBoostClassifier(**{'learning_rate': 1.7806904141367559, 'n_estimators': 66, 'algorithm': 'SAMME'}), + "XGB": XGBClassifier(**{'learning_rate': 0.21889089898592098, 'max_depth': 6, 'n_estimators': 856}), + "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), + "SVM" : SVC(**{'C': 1.9890638540240584, 'kernel': 'linear', 'max_iter':1000}), + "MLP" : MLPClassifier(**{'activation': 'logistic', 'hidden_layer_sizes': 112, 'learning_rate': 'constant', 'max_iter':500}) } # 2.2) Trained with original dataset and cost-sensitive learning elif method_id == 1: - tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) - } + ... # 2.3) Trained with oversampled training dataset elif method_id == 2: - tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) - } + ... # 2.4) Trained with undersampled training dataset elif method_id == 3: - tuned_models = { - "DT" : DecisionTreeClassifier(), - "RF" : RandomForestClassifier(), - "Bagging" : BaggingClassifier(), - "AB" : AdaBoostClassifier(), - "XGB": XGBClassifier(), - "LR" : LogisticRegression(max_iter=1000), - "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'), - "SVM" : SVC(probability=True), - "MLP" : MLPClassifier(max_iter=500) - } + ... return tuned_models # -------------------------------------------------------------------------------------------------------- @@ -201,8 +164,8 @@ if __name__ == "__main__": 'FN':FN_scorer, 'FP':FP_scorer, 'TP':TP_scorer + } # AUROC and AUPRC (plot?) - } method_names = { 0: "ORIG", 1: "ORIG_CW",