Commit 2412d533 authored by Joaquin Torres's avatar Joaquin Torres

Testing summary plots with interaction values and and timing execution

parent e9717115
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -152,7 +152,7 @@ if __name__ == "__main__": ...@@ -152,7 +152,7 @@ if __name__ == "__main__":
method_name = method_names[j] method_name = method_names[j]
# Get chosen tuned model for this group and method context # Get chosen tuned model for this group and method context
model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name]) model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name])
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------j
fitted_model = model.fit(X_train[:50], y_train[:50]) fitted_model = model.fit(X_train[:50], y_train[:50])
# # Check if we are dealing with a tree vs nn model # # Check if we are dealing with a tree vs nn model
if is_tree: if is_tree:
......
# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import shap
import ast
import matplotlib.pyplot as plt
import time
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# --------------------------------------------------------------------------------------------------------
# Reading test and training data
# --------------------------------------------------------------------------------------------------------
def read_data(attribute_names):
# Load test data
X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True)
y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True)
X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True)
y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True)
# Load ORIGINAL training data
X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)
# Load oversampled training data
X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)
# Load undersampled training data
X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)
# Type conversion needed
data_dic = {
"X_test_pre": pd.DataFrame(X_test_pre, columns=attribute_names).convert_dtypes(),
"y_test_pre": y_test_pre,
"X_test_post": pd.DataFrame(X_test_post, columns=attribute_names).convert_dtypes(),
"y_test_post": y_test_post,
"X_train_pre": pd.DataFrame(X_train_pre, columns=attribute_names).convert_dtypes(),
"y_train_pre": y_train_pre,
"X_train_post": pd.DataFrame(X_train_post, columns=attribute_names).convert_dtypes(),
"y_train_post": y_train_post,
"X_train_over_pre": pd.DataFrame(X_train_over_pre, columns=attribute_names).convert_dtypes(),
"y_train_over_pre": y_train_over_pre,
"X_train_over_post": pd.DataFrame(X_train_over_post, columns=attribute_names).convert_dtypes(),
"y_train_over_post": y_train_over_post,
"X_train_under_pre": pd.DataFrame(X_train_under_pre, columns=attribute_names).convert_dtypes(),
"y_train_under_pre": y_train_under_pre,
"X_train_under_post": pd.DataFrame(X_train_under_post, columns=attribute_names).convert_dtypes(),
"y_train_under_post": y_train_under_post,
}
return data_dic
# --------------------------------------------------------------------------------------------------------
# Retrieving parameters for chosen models
# --------------------------------------------------------------------------------------------------------
def get_chosen_model(group_str, method_str, model_name):
# Read sheet corresponding to group and method with tuned models and their hyperparameters
tuned_models_df = pd.read_excel("../model_selection/output_hyperparam/hyperparamers.xlsx", sheet_name=f"{group_str}_{method_str}")
tuned_models_df.columns = ['Model', 'Best Parameters']
# Define the mapping from model abbreviations to sklearn model classes
model_mapping = {
'DT': DecisionTreeClassifier,
'RF': RandomForestClassifier,
'Bagging': BaggingClassifier,
'AB': AdaBoostClassifier,
'XGB': XGBClassifier,
'LR': LogisticRegression,
'SVM': SVC,
'MLP': MLPClassifier
}
# Access the row for the given model name by checking the first column (index 0)
row = tuned_models_df[tuned_models_df['Model'] == model_name].iloc[0]
# Parse the dictionary of parameters from the 'Best Parameters' column
parameters = ast.literal_eval(row['Best Parameters'])
# Modify parameters based on model specifics or methods if necessary
if model_name == 'AB':
parameters['algorithm'] = 'SAMME'
elif model_name == 'LR':
parameters['max_iter'] = 1000
elif model_name == 'SVM':
parameters['max_iter'] = 1000
parameters['probability'] = True
elif model_name == "MLP":
parameters['max_iter'] = 500
# Add class_weight argument for cost-sensitive learning method
if 'CW' in method_str:
if model_name in ['Bagging', 'AB']:
parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced')
else:
parameters['class_weight'] = 'balanced'
# Fetch the class of the model
model_class = model_mapping[model_name]
# Initialize the model with the parameters
chosen_model = model_class(**parameters)
# Return if it is a tree model, for SHAP
is_tree = model_name not in ['LR', 'SVM', 'MLP']
return chosen_model, is_tree
# --------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Setup
# --------------------------------------------------------------------------------------------------------
# Retrieve attribute names in order
attribute_names = list(np.load('../gen_train_data/data/output/attributes.npy', allow_pickle=True))
# Reading data
data_dic = read_data(attribute_names)
method_names = {
0: "ORIG",
1: "ORIG_CW",
2: "OVER",
3: "UNDER"
}
model_choices = {
"ORIG": "XGB",
"ORIG_CW": "RF",
"OVER": "XGB",
"UNDER": "XGB"
}
# --------------------------------------------------------------------------------------------------------
# Shap value generation for OVER to try if shap interaction values work
# --------------------------------------------------------------------------------------------------------
group = 'pre'
method = 'under_'
X_test = data_dic['X_test_' + group]
y_test = data_dic['y_test_' + group]
X_train = data_dic['X_train_' + method + group]
y_train = data_dic['y_train_' + method + group]
method_name = 'UNDER'
# Get chosen tuned model for this group and method context
model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name])
fit_start_t = time.time()
# Fit model with training data
fitted_model = model.fit(X_train[:500], y_train[:500])
fit_end_t = time.time()
print(f'Fitted OK. Took {fit_end_t-fit_start_t} seconds.')
# Check if we are dealing with a tree vs nn model
expl_start_t = time.time()
if is_tree:
explainer = shap.TreeExplainer(fitted_model)
expl_end_t = time.time()
print(f'Explainer OK. Took {expl_end_t - expl_start_t} seconds.')
shap_start_t = time.time()
# Compute shap values
shap_val_start_t = time.time()
shap_vals = explainer.shap_values(X_test[:500], check_additivity=False) # Change to true for final results
shap_val_end_t = time.time()
print(f'Shap values computed. Took {shap_val_end_t-shap_val_start_t} seconds.')
# Compute shap interaction values
shap_interaction_values = explainer.shap_interaction_values(X_test[:500])
print(f'Shape: {shap_interaction_values.shape}')
shap_end_t = time.time()
print(f'Interaction values computed. Took {shap_end_t - shap_start_t} seconds.')
# Plot interaction values accross variables
plot_start_t = time.time()
shap.summary_plot(shap_interaction_values, X_test[:500], max_display=5)
plot_end_t = time.time()
print(f'Plot done. Took {plot_end_t - plot_start_t} seconds.')
plt.savefig('shap_summary_plot.svg', dpi=1000)
plt.close()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment