Commit 9d601f43 authored by Joaquin Torres's avatar Joaquin Torres

Merge branch 'model_sel_and_shap' of...

Merge branch 'model_sel_and_shap' of https://medal.ctb.upm.es/internal/gitlab/compara/covid_analysis into model_sel_and_shap
parents 7cc7f28b f919e066
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
import pandas as pd import pandas as pd
import numpy as np import numpy as np
import shap import shap
import ast
from xgboost import XGBClassifier from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
...@@ -61,6 +62,60 @@ def read_data(): ...@@ -61,6 +62,60 @@ def read_data():
return data_dic return data_dic
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
# Retrieving parameters for chosen models
# --------------------------------------------------------------------------------------------------------
def get_chosen_model(group_str, method_str, model_name):
# Read sheet corresponding to group and method with tuned models and their hyperparameters
tuned_models_df = pd.read_excel("../model_selection/output_hyperparam/hyperparamers.xlsx", sheet_name=f"{group_str}_{method_str}")
tuned_models_df.columns = ['Model', 'Best Parameters']
# Define the mapping from model abbreviations to sklearn model classes
model_mapping = {
'DT': DecisionTreeClassifier,
'RF': RandomForestClassifier,
'Bagging': BaggingClassifier,
'AB': AdaBoostClassifier,
'XGB': XGBClassifier,
'LR': LogisticRegression,
'SVM': SVC,
'MLP': MLPClassifier
}
# Access the row for the given model name by checking the first column (index 0)
row = tuned_models_df[tuned_models_df['Model'] == model_name].iloc[0]
# Parse the dictionary of parameters from the 'Best Parameters' column
parameters = ast.literal_eval(row['Best Parameters'])
# Modify parameters based on model specifics or methods if necessary
if model_name == 'AB':
parameters['algorithm'] = 'SAMME'
elif model_name == 'LR':
parameters['max_iter'] = 1000
elif model_name == 'SVM':
parameters['max_iter'] = 1000
parameters['probability'] = True
elif model_name == "MLP":
parameters['max_iter'] = 500
# Add class_weight argument for cost-sensitive learning method
if 'CW' in method_str:
if model_name in ['Bagging', 'AB']:
parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced')
else:
parameters['class_weight'] = 'balanced'
# Fetch the class of the model
model_class = model_mapping[model_name]
# Initialize the model with the parameters
chosen_model = model_class(**parameters)
# Return if it is a tree model, for SHAP
is_tree = model_name not in ['LR', 'SVM', 'MLP']
return chosen_model, is_tree
# --------------------------------------------------------------------------------------------------------
if __name__ == "__main__": if __name__ == "__main__":
# Setup # Setup
...@@ -73,48 +128,44 @@ if __name__ == "__main__": ...@@ -73,48 +128,44 @@ if __name__ == "__main__":
2: "OVER", 2: "OVER",
3: "UNDER" 3: "UNDER"
} }
# Best model initialization (to be completed - manually)
# Mapping group-method -> (isTreeModel:bool, model) model_choices = {
models = { "ORIG": "XGB",
"pre_ORIG": (None,None), "ORIG_CW": "RF",
"pre_ORIG_CW": (None,None), "OVER": "XGB",
"pre_OVER": (None,None), "UNDER": "XGB"
"pre_UNDER": (None,None),
"post_ORIG": (None,None),
"post_ORIG": (None,None),
"post_ORIG_CW": (None,None),
"post_OVER": (None,None),
"post_UNDER": (None,None),
} }
# # Retrieve attribute names in order # Retrieve attribute names in order
# df = pd.read_csv("..\gen_train_data\data\input\pre_dataset.csv") df = pd.read_csv("../gen_train_data/data/input/pre_dataset.csv")
# attribute_names = list(df.columns.values) attribute_names = list(df.columns.values)
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
# Shap value generation # Shap value generation
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
for i, group in enumerate(['pre', 'post']): for i, group in enumerate(['pre', 'post']):
# Get test dataset based on group # Get test dataset based on group, add column names
X_test = data_dic['X_test_' + group] X_test = pd.DataFrame(data_dic['X_test_' + group], columns=attribute_names)
y_test = data_dic['y_test_' + group] y_test = data_dic['y_test_' + group]
for j, method in enumerate(['', '', 'over_', 'under_']): for j, method in enumerate(['', '', 'over_', 'under_']):
print(f"{group}-{method_names[j]}") print(f"{group}-{method_names[j]}")
# Get train dataset based on group and method # Get train dataset based on group and method
X_train = data_dic['X_train_' + method + group] X_train = pd.DataFrame(data_dic['X_train_' + method + group], columns=attribute_names)
y_train = data_dic['y_train_' + method + group] y_train = data_dic['y_train_' + method + group]
# Retrieve best model for this group-method context method_name = method_names[j]
model_info = models[group + '_' + method_names[j]] # Get chosen tuned model for this group and method context
is_tree = model_info[0] model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name])
model = model_info[1] # --------------------------------------------------------------------------------------------------------
# Fit model with training data # Fit model with training data
fitted_model = model.fit(X_train[:500], y_train[:500]) fitted_model = model.fit(X_train[:500], y_train[:500])
# Check if we are dealing with a tree vs nn model # # Check if we are dealing with a tree vs nn model
if is_tree: if is_tree:
explainer = shap.TreeExplainer(fitted_model, X_test[:500]) explainer = shap.TreeExplainer(fitted_model)
else: # else:
explainer = shap.KernelExplainer(fitted_model.predict, X_test[:500]) # explainer = shap.KernelExplainer(fitted_model.predict_proba, X_test[:500])
# Compute shap values # Compute shap values
shap_vals = explainer.shap_values(X_test[:500], check_additivity=False) # Change to true for final results shap_vals = explainer.shap_values(X_test[:500], check_additivity=False) # Change to true for final results
# ---------------------------------------------------------------------------------------------------------
# Save results # Save results
np.save(f"shap_values/{group}_{method_names[j]}", shap_vals) np.save(f"./output/shap_values/{group}_{method_names[j]}", shap_vals)
print(f'Shape of numpy array: {shap_vals.shape}')
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment