Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Sign in
Toggle navigation
C
covid_analysis
Project overview
Project overview
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
COMPARA
covid_analysis
Commits
d57b78fe
Commit
d57b78fe
authored
May 10, 2024
by
Joaquin Torres
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update hyperparamers for PRE
parent
4ee1a0a1
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
28 additions
and
28 deletions
+28
-28
model_selection/test_models.py
model_selection/test_models.py
+28
-28
No files found.
model_selection/test_models.py
View file @
d57b78fe
...
@@ -43,48 +43,48 @@ def get_tuned_models(group_id, method_id):
...
@@ -43,48 +43,48 @@ def get_tuned_models(group_id, method_id):
# 1.1) Trained with original dataset
# 1.1) Trained with original dataset
if
method_id
==
0
:
if
method_id
==
0
:
tuned_models
=
{
tuned_models
=
{
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'best'
,
'max_features'
:
'sqrt'
,
'criterion'
:
'
gini
'
}),
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'best'
,
'max_features'
:
'sqrt'
,
'criterion'
:
'
entropy
'
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'
gini'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
117
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'
entropy'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
123
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
1.0
,
'n_estimators'
:
23
,
'warm_start'
:
Tru
e
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
0.8
,
'n_estimators'
:
13
,
'warm_start'
:
Fals
e
}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.
9189147333140566
,
'n_estimators'
:
131
,
'algorithm'
:
'SAMME'
}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.
8473150336970519
,
'n_estimators'
:
96
,
'algorithm'
:
'SAMME'
}),
"XGB"
:
XGBClassifier
(
**
{
'learning_rate'
:
0.2
2870029177880222
,
'max_depth'
:
8
,
'n_estimators'
:
909
}),
"XGB"
:
XGBClassifier
(
**
{
'learning_rate'
:
0.2
1528982071549305
,
'max_depth'
:
6
,
'n_estimators'
:
804
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'lbfgs'
,
'penalty'
:
None
,
'max_iter'
:
1000
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'lbfgs'
,
'penalty'
:
'l2'
,
'max_iter'
:
1000
}),
#"SVM" : SVC(**{'C': 0.9872682949695772
, 'kernel': 'linear', 'max_iter':1000, 'probability': True}),
"SVM"
:
SVC
(
**
{
'C'
:
1.051871311397777
,
'kernel'
:
'linear'
,
'max_iter'
:
1000
,
'probability'
:
True
}),
"MLP"
:
MLPClassifier
(
**
{
'activation'
:
'identity'
,
'hidden_layer_sizes'
:
122
,
'learning_rate'
:
'invscaling'
,
'max_iter'
:
500
})
"MLP"
:
MLPClassifier
(
**
{
'activation'
:
'identity'
,
'hidden_layer_sizes'
:
78
,
'learning_rate'
:
'constant'
,
'max_iter'
:
500
})
}
}
# 1.2) Trained with original dataset and cost-sensitive learning
# 1.2) Trained with original dataset and cost-sensitive learning
elif
method_id
==
1
:
elif
method_id
==
1
:
tuned_models
=
{
tuned_models
=
{
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'best'
,
'max_features'
:
'log2'
,
'criterion'
:
'entropy'
,
'class_weight'
:
'balanced'
}),
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'best'
,
'max_features'
:
'log2'
,
'criterion'
:
'entropy'
,
'class_weight'
:
'balanced'
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'entropy'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
11
8
,
'class_weight'
:
'balanced'
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'entropy'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
23
8
,
'class_weight'
:
'balanced'
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
1.0
,
'n_estimators'
:
15
,
'warm_start'
:
False
,
'estimator'
:
DecisionTreeClassifier
(
class_weight
=
'balanced'
)}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
0.8
,
'n_estimators'
:
22
,
'warm_start'
:
False
,
'estimator'
:
DecisionTreeClassifier
(
class_weight
=
'balanced'
)}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
0.8159074545140872
,
'n_estimators'
:
121
,
'algorithm'
:
'SAMME'
,
'estimator'
:
DecisionTreeClassifier
(
class_weight
=
'balanced'
)}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.7136783954287846
,
'n_estimators'
:
99
,
'algorithm'
:
'SAMME'
,
'estimator'
:
DecisionTreeClassifier
(
class_weight
=
'balanced'
)}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'lbfgs'
,
'penalty'
:
None
,
'max_iter'
:
1000
,
'class_weight'
:
'balanced'
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'lbfgs'
,
'penalty'
:
'l2'
,
'max_iter'
:
1000
,
'class_weight'
:
'balanced'
}),
#"SVM": SVC(**{'C': 1.5550524351360953
, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced', 'probability': True}),
"SVM"
:
SVC
(
**
{
'C'
:
1.480857958217729
,
'kernel'
:
'linear'
,
'max_iter'
:
1000
,
'class_weight'
:
'balanced'
,
'probability'
:
True
}),
}
}
# 1.3) Trained with oversampled training dataset
# 1.3) Trained with oversampled training dataset
elif
method_id
==
2
:
elif
method_id
==
2
:
tuned_models
=
{
tuned_models
=
{
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'
random
'
,
'max_features'
:
'sqrt'
,
'criterion'
:
'log_loss'
}),
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'
best
'
,
'max_features'
:
'sqrt'
,
'criterion'
:
'log_loss'
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'gini'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
1
35
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'gini'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
1
21
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
1.0
,
'n_estimators'
:
2
6
,
'warm_start'
:
True
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
1.0
,
'n_estimators'
:
2
2
,
'warm_start'
:
True
}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.
6590924545876917
,
'n_estimators'
:
141
,
'algorithm'
:
'SAMME'
}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.
4640913091426446
,
'n_estimators'
:
145
,
'algorithm'
:
'SAMME'
}),
"XGB"
:
XGBClassifier
(
**
{
'learning_rate'
:
0.
26946295284728783
,
'max_depth'
:
7
,
'n_estimators'
:
893
}),
"XGB"
:
XGBClassifier
(
**
{
'learning_rate'
:
0.
19621698151985992
,
'max_depth'
:
7
,
'n_estimators'
:
840
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'lbfgs'
,
'penalty'
:
'l2'
,
'max_iter'
:
1000
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'lbfgs'
,
'penalty'
:
'l2'
,
'max_iter'
:
1000
}),
#"SVM" : SVC(**{'C': 1.676419306008229
, 'kernel': 'poly', 'max_iter':1000, 'probability': True}),
"SVM"
:
SVC
(
**
{
'C'
:
1.590799972846728
,
'kernel'
:
'poly'
,
'max_iter'
:
1000
,
'probability'
:
True
}),
"MLP"
:
MLPClassifier
(
**
{
'activation'
:
'relu'
,
'hidden_layer_sizes'
:
11
6
,
'learning_rate'
:
'invscaling
'
,
'max_iter'
:
500
})
"MLP"
:
MLPClassifier
(
**
{
'activation'
:
'relu'
,
'hidden_layer_sizes'
:
11
2
,
'learning_rate'
:
'constant
'
,
'max_iter'
:
500
})
}
}
# 1.4) Trained with undersampled training dataset
# 1.4) Trained with undersampled training dataset
elif
method_id
==
3
:
elif
method_id
==
3
:
tuned_models
=
{
tuned_models
=
{
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'best'
,
'max_features'
:
'sqrt'
,
'criterion'
:
'
gini
'
}),
"DT"
:
DecisionTreeClassifier
(
**
{
'splitter'
:
'best'
,
'max_features'
:
'sqrt'
,
'criterion'
:
'
log_loss
'
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'
entropy'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
104
}),
"RF"
:
RandomForestClassifier
(
**
{
'criterion'
:
'
gini'
,
'max_features'
:
'sqrt'
,
'n_estimators'
:
148
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
1.0
,
'n_estimators'
:
38
,
'warm_start'
:
True
}),
"Bagging"
:
BaggingClassifier
(
**
{
'max_features'
:
1.0
,
'max_samples'
:
0.8
,
'n_estimators'
:
24
,
'warm_start'
:
True
}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.
6996764264041269
,
'n_estimators'
:
93
,
'algorithm'
:
'SAMME'
}),
"AB"
:
AdaBoostClassifier
(
**
{
'learning_rate'
:
1.
7970533619575801
,
'n_estimators'
:
122
,
'algorithm'
:
'SAMME'
}),
"XGB"
:
XGBClassifier
(
**
{
'learning_rate'
:
0.
26480707899668926
,
'max_depth'
:
7
,
'n_estimators'
:
959
}),
"XGB"
:
XGBClassifier
(
**
{
'learning_rate'
:
0.
13148624656904934
,
'max_depth'
:
9
,
'n_estimators'
:
723
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'
lbfgs'
,
'penalty'
:
None
,
'max_iter'
:
1000
}),
"LR"
:
LogisticRegression
(
**
{
'solver'
:
'
sag'
,
'penalty'
:
'l2'
,
'max_iter'
:
1000
}),
#"SVM" : SVC(**{'C': 1.1996501173654208
, 'kernel': 'poly', 'max_iter':1000, 'probability': True}),
"SVM"
:
SVC
(
**
{
'C'
:
1.383651513577477
,
'kernel'
:
'poly'
,
'max_iter'
:
1000
,
'probability'
:
True
}),
"MLP"
:
MLPClassifier
(
**
{
'activation'
:
'relu'
,
'hidden_layer_sizes'
:
131
,
'learning_rate'
:
'constant
'
,
'max_iter'
:
500
})
"MLP"
:
MLPClassifier
(
**
{
'activation'
:
'relu'
,
'hidden_layer_sizes'
:
89
,
'learning_rate'
:
'invscaling
'
,
'max_iter'
:
500
})
}
}
# 2. POST
# 2. POST
else
:
else
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment