diff --git a/explicability/compute_shap_vals.py b/explicability/compute_shap_vals.py new file mode 100644 index 0000000000000000000000000000000000000000..f98debc7f82faf432867e9f674e310be0cd18d29 --- /dev/null +++ b/explicability/compute_shap_vals.py @@ -0,0 +1,82 @@ +# Libraries +# -------------------------------------------------------------------------------------------------------- +import pandas as pd +import numpy as np +import shap +import pickle +from xgboost import XGBClassifier +from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier +from sklearn.neural_network import MLPClassifier +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.tree import DecisionTreeClassifier +# -------------------------------------------------------------------------------------------------------- + +# Reading test data +# -------------------------------------------------------------------------------------------------------- +def read_test_data(attribute_names): + # Load test data + X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True) + y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True) + X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True) + y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True) + + # Type conversion needed + data_dic = { + "X_test_pre": pd.DataFrame(X_test_pre, columns=attribute_names).convert_dtypes(), + "y_test_pre": y_test_pre, + "X_test_post": pd.DataFrame(X_test_post, columns=attribute_names).convert_dtypes(), + "y_test_post": y_test_post, + } + return data_dic +# -------------------------------------------------------------------------------------------------------- + +if __name__ == "__main__": + + # Setup + # -------------------------------------------------------------------------------------------------------- + # Retrieve attribute names in order + attribute_names = list(np.load('../gen_train_data/data/output/attributes.npy', allow_pickle=True)) + # Reading data + data_dic = read_test_data(attribute_names) + method_names = { + 0: "ORIG", + 1: "ORIG_CW", + 2: "OVER", + 3: "UNDER" + } + model_choices = { + "ORIG": "XGB", + "ORIG_CW": "RF", + "OVER": "XGB", + "UNDER": "XGB" + } + # -------------------------------------------------------------------------------------------------------- + + # Shap value generation + # -------------------------------------------------------------------------------------------------------- + for i, group in enumerate(['pre', 'post']): + # Get test dataset based on group, add column names + X_test = data_dic['X_test_' + group] + y_test = data_dic['y_test_' + group] + for j, method in enumerate(['', '', 'over_', 'under_']): + print(f"{group}-{method_names[j]}") + method_name = method_names[j] + model_name = model_choices[method_name] + model_path = f"./output/fitted_models/{group}_{method_names[j]}_{model_name}.pkl" + # Load the fitted model from disk + with open(model_path, 'rb') as file: + fitted_model = pickle.load(file) + # Check if we are dealing with a tree vs nn model + is_tree = model_name not in ['LR', 'SVM', 'MLP'] + if is_tree: + explainer = shap.TreeExplainer(fitted_model) + # else: + # explainer = shap.KernelExplainer(fitted_model.predict_proba, X_test[:500]) + # Compute shap values + shap_vals = explainer.shap_values(X_test, check_additivity=True) # Change to true for final results + # --------------------------------------------------------------------------------------------------------- + # Save results + np.save(f"./output/shap_values/{group}_{method_names[j]}", shap_vals) + print(f'Shape of numpy array: {shap_vals.shape}') + # -------------------------------------------------------------------------------------------------------- \ No newline at end of file diff --git a/explicability/shap_vals.py b/explicability/shap_vals.py deleted file mode 100644 index c231a0f5dee10d2280e096716cfcdec1dfc4dfd2..0000000000000000000000000000000000000000 --- a/explicability/shap_vals.py +++ /dev/null @@ -1,168 +0,0 @@ -# Libraries -# -------------------------------------------------------------------------------------------------------- -import pandas as pd -import numpy as np -import shap -import ast - -from xgboost import XGBClassifier -from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier -from sklearn.neural_network import MLPClassifier -from sklearn.svm import SVC -from sklearn.linear_model import LogisticRegression -from sklearn.tree import DecisionTreeClassifier -# -------------------------------------------------------------------------------------------------------- - -# Reading test and training data -# -------------------------------------------------------------------------------------------------------- -def read_data(attribute_names): - # Load test data - X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True) - y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True) - X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True) - y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True) - - # Load ORIGINAL training data - X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True) - y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True) - X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True) - y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True) - - # Load oversampled training data - X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True) - y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True) - X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True) - y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True) - - # Load undersampled training data - X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True) - y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True) - X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True) - y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True) - - # Type conversion needed - data_dic = { - "X_test_pre": pd.DataFrame(X_test_pre, columns=attribute_names).convert_dtypes(), - "y_test_pre": y_test_pre, - "X_test_post": pd.DataFrame(X_test_post, columns=attribute_names).convert_dtypes(), - "y_test_post": y_test_post, - "X_train_pre": pd.DataFrame(X_train_pre, columns=attribute_names).convert_dtypes(), - "y_train_pre": y_train_pre, - "X_train_post": pd.DataFrame(X_train_post, columns=attribute_names).convert_dtypes(), - "y_train_post": y_train_post, - "X_train_over_pre": pd.DataFrame(X_train_over_pre, columns=attribute_names).convert_dtypes(), - "y_train_over_pre": y_train_over_pre, - "X_train_over_post": pd.DataFrame(X_train_over_post, columns=attribute_names).convert_dtypes(), - "y_train_over_post": y_train_over_post, - "X_train_under_pre": pd.DataFrame(X_train_under_pre, columns=attribute_names).convert_dtypes(), - "y_train_under_pre": y_train_under_pre, - "X_train_under_post": pd.DataFrame(X_train_under_post, columns=attribute_names).convert_dtypes(), - "y_train_under_post": y_train_under_post, - } - return data_dic -# -------------------------------------------------------------------------------------------------------- - -# Retrieving parameters for chosen models -# -------------------------------------------------------------------------------------------------------- -def get_chosen_model(group_str, method_str, model_name): - # Read sheet corresponding to group and method with tuned models and their hyperparameters - tuned_models_df = pd.read_excel("../model_selection/output_hyperparam/hyperparamers.xlsx", sheet_name=f"{group_str}_{method_str}") - tuned_models_df.columns = ['Model', 'Best Parameters'] - - # Define the mapping from model abbreviations to sklearn model classes - model_mapping = { - 'DT': DecisionTreeClassifier, - 'RF': RandomForestClassifier, - 'Bagging': BaggingClassifier, - 'AB': AdaBoostClassifier, - 'XGB': XGBClassifier, - 'LR': LogisticRegression, - 'SVM': SVC, - 'MLP': MLPClassifier - } - - # Access the row for the given model name by checking the first column (index 0) - row = tuned_models_df[tuned_models_df['Model'] == model_name].iloc[0] - - # Parse the dictionary of parameters from the 'Best Parameters' column - parameters = ast.literal_eval(row['Best Parameters']) - - # Modify parameters based on model specifics or methods if necessary - if model_name == 'AB': - parameters['algorithm'] = 'SAMME' - elif model_name == 'LR': - parameters['max_iter'] = 1000 - elif model_name == 'SVM': - parameters['max_iter'] = 1000 - parameters['probability'] = True - elif model_name == "MLP": - parameters['max_iter'] = 500 - - # Add class_weight argument for cost-sensitive learning method - if 'CW' in method_str: - if model_name in ['Bagging', 'AB']: - parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced') - else: - parameters['class_weight'] = 'balanced' - - # Fetch the class of the model - model_class = model_mapping[model_name] - - # Initialize the model with the parameters - chosen_model = model_class(**parameters) - # Return if it is a tree model, for SHAP - is_tree = model_name not in ['LR', 'SVM', 'MLP'] - - return chosen_model, is_tree -# -------------------------------------------------------------------------------------------------------- - -if __name__ == "__main__": - - # Setup - # -------------------------------------------------------------------------------------------------------- - # Retrieve attribute names in order - attribute_names = list(np.load('../gen_train_data/data/output/attributes.npy', allow_pickle=True)) - # Reading data - data_dic = read_data(attribute_names) - method_names = { - 0: "ORIG", - 1: "ORIG_CW", - 2: "OVER", - 3: "UNDER" - } - model_choices = { - "ORIG": "XGB", - "ORIG_CW": "RF", - "OVER": "XGB", - "UNDER": "XGB" - } - # -------------------------------------------------------------------------------------------------------- - - # Shap value generation - # -------------------------------------------------------------------------------------------------------- - for i, group in enumerate(['pre', 'post']): - # Get test dataset based on group, add column names - X_test = data_dic['X_test_' + group] - y_test = data_dic['y_test_' + group] - for j, method in enumerate(['', '', 'over_', 'under_']): - print(f"{group}-{method_names[j]}") - # Get train dataset based on group and method - X_train = data_dic['X_train_' + method + group] - y_train = data_dic['y_train_' + method + group] - method_name = method_names[j] - # Get chosen tuned model for this group and method context - model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name]) - # --------------------------------------------------------------------------------------------------------j - fitted_model = model.fit(X_train[:50], y_train[:50]) - # # Check if we are dealing with a tree vs nn model - if is_tree: - explainer = shap.TreeExplainer(fitted_model) - # else: - # explainer = shap.KernelExplainer(fitted_model.predict_proba, X_test[:500]) - # Compute shap values - shap_vals = explainer.shap_values(X_test[:50], check_additivity=False) # Change to true for final results - # --------------------------------------------------------------------------------------------------------- - # Save results - np.save(f"./output/shap_values/{group}_{method_names[j]}", shap_vals) - print(f'Shape of numpy array: {shap_vals.shape}') - # -------------------------------------------------------------------------------------------------------- \ No newline at end of file