Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Sign in
Toggle navigation
C
covid_analysis
Project overview
Project overview
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
COMPARA
covid_analysis
Commits
cf69c55e
Commit
cf69c55e
authored
May 23, 2024
by
Joaquin Torres
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Generated first ROC curve to see behavior
parent
72e7890d
Changes
3
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
6764 additions
and
27 deletions
+6764
-27
model_selection/cv_metric_gen.py
model_selection/cv_metric_gen.py
+25
-27
model_selection/output_cv_metrics/curves/pre_ORIG.svg
model_selection/output_cv_metrics/curves/pre_ORIG.svg
+6739
-0
model_selection/output_cv_metrics/metrics.xlsx
model_selection/output_cv_metrics/metrics.xlsx
+0
-0
No files found.
model_selection/cv_metric_gen.py
View file @
cf69c55e
...
@@ -175,8 +175,8 @@ if __name__ == "__main__":
...
@@ -175,8 +175,8 @@ if __name__ == "__main__":
# Metric generation through cv for tuned models3
# Metric generation through cv for tuned models3
# --------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------
scores_sheets
=
{}
# To store score dfs as sheets in the same excel file
scores_sheets
=
{}
# To store score dfs as sheets in the same excel file
for
i
,
group
in
enumerate
([
'pre'
,
'post'
]):
for
i
,
group
in
enumerate
([
'pre'
]):
# 'post'
for
j
,
method
in
enumerate
([
''
,
''
,
'over_'
,
'under_'
]):
for
j
,
method
in
enumerate
([
''
]):
# '', 'over_', 'under_'
# print(f"{group}-{method_names[j]}")
# print(f"{group}-{method_names[j]}")
# Get train dataset based on group and method
# Get train dataset based on group and method
X_train
=
data_dic
[
'X_train_'
+
method
+
group
]
X_train
=
data_dic
[
'X_train_'
+
method
+
group
]
...
@@ -191,28 +191,29 @@ if __name__ == "__main__":
...
@@ -191,28 +191,29 @@ if __name__ == "__main__":
axes
=
[
axes
]
axes
=
[
axes
]
# Metric generation for each model
# Metric generation for each model
for
model_idx
,
(
model_name
,
model
)
in
enumerate
(
models
.
items
()):
for
model_idx
,
(
model_name
,
model
)
in
enumerate
(
models
.
items
()):
print
(
f
"{group}-{method_names[j]}-{model_name}"
)
if
model_name
==
'DT'
:
# Retrieve cv scores for our metrics of interest
print
(
f
"{group}-{method_names[j]}-{model_name}"
)
scores
=
cross_validate
(
model
,
X_train
,
y_train
,
scoring
=
scorings
,
cv
=
cv
,
return_train_score
=
True
,
n_jobs
=
10
)
# Retrieve cv scores for our metrics of interest
# Save results of each fold
scores
=
cross_validate
(
model
,
X_train
,
y_train
,
scoring
=
scorings
,
cv
=
cv
,
return_train_score
=
True
,
n_jobs
=
10
)
for
metric_name
in
scorings
.
keys
():
# Save results of each fold
scores_df
.
loc
[
model_name
+
f
'_{metric_name}'
]
=
list
(
np
.
around
(
np
.
array
(
scores
[
f
"test_{metric_name}"
]),
4
))
for
metric_name
in
scorings
.
keys
():
# Generate ROC curves
scores_df
.
loc
[
model_name
+
f
'_{metric_name}'
]
=
list
(
np
.
around
(
np
.
array
(
scores
[
f
"test_{metric_name}"
]),
4
))
mean_fpr
=
np
.
linspace
(
0
,
1
,
100
)
# Generate ROC curves
tprs
,
aucs
=
[],
[]
mean_fpr
=
np
.
linspace
(
0
,
1
,
100
)
# Loop through each fold in the cross-validation
tprs
,
aucs
=
[],
[]
for
fold_idx
,
(
train
,
test
)
in
enumerate
(
cv
.
split
(
X_train
,
y_train
)):
# Loop through each fold in the cross-validation
# Fit the model on the training data
for
fold_idx
,
(
train
,
test
)
in
enumerate
(
cv
.
split
(
X_train
,
y_train
)):
model
.
fit
(
X_train
[
train
],
y_train
[
train
])
# Fit the model on the training data
# Use RocCurveDisplay to generate the ROC curve
model
.
fit
(
X_train
[
train
],
y_train
[
train
])
roc_display
=
RocCurveDisplay
.
from_estimator
(
model
,
X_train
[
test
],
y_train
[
test
],
# Use RocCurveDisplay to generate the ROC curve
name
=
f
"ROC fold {fold_idx}"
,
alpha
=
0.3
,
lw
=
1
,
ax
=
axes
[
model_idx
])
roc_display
=
RocCurveDisplay
.
from_estimator
(
model
,
X_train
[
test
],
y_train
[
test
],
# Interpolate the true positive rates to get a smooth curve
name
=
f
"ROC fold {fold_idx}"
,
alpha
=
0.3
,
lw
=
1
,
ax
=
axes
[
model_idx
])
interp_tpr
=
np
.
interp
(
mean_fpr
,
roc_display
.
fpr
,
roc_display
.
tpr
)
# Interpolate the true positive rates to get a smooth curve
interp_tpr
[
0
]
=
0.0
interp_tpr
=
np
.
interp
(
mean_fpr
,
roc_display
.
fpr
,
roc_display
.
tpr
)
# Append the interpolated TPR and AUC for this fold
interp_tpr
[
0
]
=
0.0
tprs
.
append
(
interp_tpr
)
# Append the interpolated TPR and AUC for this fold
aucs
.
append
(
roc_display
.
roc_auc
)
tprs
.
append
(
interp_tpr
)
aucs
.
append
(
roc_display
.
roc_auc
)
# Plot the diagonal line representing random guessing
# Plot the diagonal line representing random guessing
axes
[
model_idx
]
.
plot
([
0
,
1
],
[
0
,
1
],
linestyle
=
'--'
,
lw
=
2
,
color
=
'r'
,
alpha
=
.8
)
axes
[
model_idx
]
.
plot
([
0
,
1
],
[
0
,
1
],
linestyle
=
'--'
,
lw
=
2
,
color
=
'r'
,
alpha
=
.8
)
# Compute the mean and standard deviation of the TPRs
# Compute the mean and standard deviation of the TPRs
...
@@ -220,19 +221,16 @@ if __name__ == "__main__":
...
@@ -220,19 +221,16 @@ if __name__ == "__main__":
mean_tpr
[
-
1
]
=
1.0
mean_tpr
[
-
1
]
=
1.0
mean_auc
=
auc
(
mean_fpr
,
mean_tpr
)
# Calculate the mean AUC
mean_auc
=
auc
(
mean_fpr
,
mean_tpr
)
# Calculate the mean AUC
std_auc
=
np
.
std
(
aucs
)
std_auc
=
np
.
std
(
aucs
)
# Plot the mean ROC curve
# Plot the mean ROC curve
axes
[
model_idx
]
.
plot
(
mean_fpr
,
mean_tpr
,
color
=
'b'
,
axes
[
model_idx
]
.
plot
(
mean_fpr
,
mean_tpr
,
color
=
'b'
,
label
=
r'Mean ROC (AUC =
%0.2
f $\pm$
%0.2
f)'
%
(
mean_auc
,
std_auc
),
label
=
r'Mean ROC (AUC =
%0.2
f $\pm$
%0.2
f)'
%
(
mean_auc
,
std_auc
),
lw
=
2
,
alpha
=
.8
)
lw
=
2
,
alpha
=
.8
)
# Plot the standard deviation of the TPRs
# Plot the standard deviation of the TPRs
std_tpr
=
np
.
std
(
tprs
,
axis
=
0
)
std_tpr
=
np
.
std
(
tprs
,
axis
=
0
)
tprs_upper
=
np
.
minimum
(
mean_tpr
+
std_tpr
,
1
)
tprs_upper
=
np
.
minimum
(
mean_tpr
+
std_tpr
,
1
)
tprs_lower
=
np
.
maximum
(
mean_tpr
-
std_tpr
,
0
)
tprs_lower
=
np
.
maximum
(
mean_tpr
-
std_tpr
,
0
)
axes
[
model_idx
]
.
fill_between
(
mean_fpr
,
tprs_lower
,
tprs_upper
,
color
=
'grey'
,
alpha
=
.2
,
axes
[
model_idx
]
.
fill_between
(
mean_fpr
,
tprs_lower
,
tprs_upper
,
color
=
'grey'
,
alpha
=
.2
,
label
=
r'$\pm$ 1 std. dev.'
)
label
=
r'$\pm$ 1 std. dev.'
)
# Set plot limits and title
# Set plot limits and title
axes
[
model_idx
]
.
set
(
xlim
=
[
-
0.05
,
1.05
],
ylim
=
[
-
0.05
,
1.05
],
axes
[
model_idx
]
.
set
(
xlim
=
[
-
0.05
,
1.05
],
ylim
=
[
-
0.05
,
1.05
],
title
=
f
"ROC Curve - {model_name} ({group}-{method_names[j]})"
)
title
=
f
"ROC Curve - {model_name} ({group}-{method_names[j]})"
)
...
...
model_selection/output_cv_metrics/curves/pre_ORIG.svg
0 → 100644
View file @
cf69c55e
This diff is collapsed.
Click to expand it.
model_selection/output_cv_metrics/metrics.xlsx
0 → 100644
View file @
cf69c55e
File added
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment