Commit d57b78fe authored by Joaquin Torres's avatar Joaquin Torres

update hyperparamers for PRE

parent 4ee1a0a1
...@@ -43,48 +43,48 @@ def get_tuned_models(group_id, method_id): ...@@ -43,48 +43,48 @@ def get_tuned_models(group_id, method_id):
# 1.1) Trained with original dataset # 1.1) Trained with original dataset
if method_id == 0: if method_id == 0:
tuned_models = { tuned_models = {
"DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'gini'}), "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'entropy'}),
"RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 117}), "RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 123}),
"Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 23, 'warm_start': True}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 13, 'warm_start': False}),
"AB" : AdaBoostClassifier(**{'learning_rate': 1.9189147333140566, 'n_estimators': 131, 'algorithm': 'SAMME'}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.8473150336970519, 'n_estimators': 96, 'algorithm': 'SAMME'}),
"XGB": XGBClassifier(**{'learning_rate': 0.22870029177880222, 'max_depth': 8, 'n_estimators': 909}), "XGB": XGBClassifier(**{'learning_rate': 0.21528982071549305, 'max_depth': 6, 'n_estimators': 804}),
"LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2','max_iter': 1000}),
#"SVM" : SVC(**{'C': 0.9872682949695772, 'kernel': 'linear', 'max_iter':1000, 'probability': True}), "SVM" : SVC(**{'C': 1.051871311397777, 'kernel': 'linear', 'max_iter':1000, 'probability': True}),
"MLP" : MLPClassifier(**{'activation': 'identity', 'hidden_layer_sizes': 122, 'learning_rate': 'invscaling', 'max_iter':500}) "MLP" : MLPClassifier(**{'activation': 'identity', 'hidden_layer_sizes': 78, 'learning_rate': 'constant','max_iter':500})
} }
# 1.2) Trained with original dataset and cost-sensitive learning # 1.2) Trained with original dataset and cost-sensitive learning
elif method_id == 1: elif method_id == 1:
tuned_models = { tuned_models = {
"DT": DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'entropy', 'class_weight': 'balanced'}), "DT": DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'entropy', 'class_weight': 'balanced'}),
"RF": RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 118, 'class_weight': 'balanced'}), "RF": RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 238, 'class_weight': 'balanced'}),
"Bagging": BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 15, 'warm_start': False, 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "Bagging": BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 22, 'warm_start': False, 'estimator': DecisionTreeClassifier(class_weight='balanced')}),
"AB": AdaBoostClassifier(**{'learning_rate': 0.8159074545140872, 'n_estimators': 121, 'algorithm': 'SAMME', 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "AB": AdaBoostClassifier(**{'learning_rate': 1.7136783954287846, 'n_estimators': 99, 'algorithm': 'SAMME', 'estimator': DecisionTreeClassifier(class_weight='balanced')}),
"LR": LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000, 'class_weight': 'balanced'}), "LR": LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000, 'class_weight': 'balanced'}),
#"SVM": SVC(**{'C': 1.5550524351360953, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced', 'probability': True}), "SVM": SVC(**{'C': 1.480857958217729, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced', 'probability': True}),
} }
# 1.3) Trained with oversampled training dataset # 1.3) Trained with oversampled training dataset
elif method_id == 2: elif method_id == 2:
tuned_models = { tuned_models = {
"DT" : DecisionTreeClassifier(**{'splitter': 'random', 'max_features': 'sqrt', 'criterion': 'log_loss'}), "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'log_loss'}),
"RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 135}), "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 121}),
"Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 26, 'warm_start': True}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 22, 'warm_start': True}),
"AB" : AdaBoostClassifier(**{'learning_rate': 1.6590924545876917, 'n_estimators': 141, 'algorithm': 'SAMME'}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.4640913091426446, 'n_estimators': 145, 'algorithm': 'SAMME'}),
"XGB": XGBClassifier(**{'learning_rate': 0.26946295284728783, 'max_depth': 7, 'n_estimators': 893}), "XGB": XGBClassifier(**{'learning_rate': 0.19621698151985992, 'max_depth': 7, 'n_estimators': 840}),
"LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}),
#"SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000, 'probability': True}), "SVM" : SVC(**{'C': 1.590799972846728, 'kernel': 'poly', 'max_iter':1000, 'probability': True}),
"MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 116, 'learning_rate': 'invscaling', 'max_iter':500}) "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 112, 'learning_rate': 'constant', 'max_iter':500})
} }
# 1.4) Trained with undersampled training dataset # 1.4) Trained with undersampled training dataset
elif method_id == 3: elif method_id == 3:
tuned_models = { tuned_models = {
"DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'gini'}), "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'log_loss'}),
"RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 104}), "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 148}),
"Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 38, 'warm_start': True}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 24, 'warm_start': True}),
"AB" : AdaBoostClassifier(**{'learning_rate': 1.6996764264041269, 'n_estimators': 93, 'algorithm': 'SAMME'}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.7970533619575801, 'n_estimators': 122, 'algorithm': 'SAMME'}),
"XGB": XGBClassifier(**{'learning_rate': 0.26480707899668926, 'max_depth': 7, 'n_estimators': 959}), "XGB": XGBClassifier(**{'learning_rate': 0.13148624656904934, 'max_depth': 9, 'n_estimators': 723}),
"LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), "LR" : LogisticRegression(**{'solver': 'sag', 'penalty': 'l2', 'max_iter': 1000}),
#"SVM" : SVC(**{'C': 1.1996501173654208, 'kernel': 'poly', 'max_iter':1000, 'probability': True}), "SVM" : SVC(**{'C': 1.383651513577477, 'kernel': 'poly', 'max_iter':1000, 'probability': True}),
"MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 131, 'learning_rate': 'constant', 'max_iter':500}) "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 89, 'learning_rate': 'invscaling', 'max_iter':500})
} }
# 2. POST # 2. POST
else: else:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment