# Libraries # -------------------------------------------------------------------------------------------------------- import pandas as pd import numpy as np import shap import ast from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier # -------------------------------------------------------------------------------------------------------- # Reading test and training data # -------------------------------------------------------------------------------------------------------- def read_data(): # Load test data X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True) y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True) X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True) y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True) # Load ORIGINAL training data X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True) y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True) X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True) y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True) # Load oversampled training data X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True) y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True) X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True) y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True) # Load undersampled training data X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True) y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True) X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True) y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True) data_dic = { "X_test_pre": X_test_pre, "y_test_pre": y_test_pre, "X_test_post": X_test_post, "y_test_post": y_test_post, "X_train_pre": X_train_pre, "y_train_pre": y_train_pre, "X_train_post": X_train_post, "y_train_post": y_train_post, "X_train_over_pre": X_train_over_pre, "y_train_over_pre": y_train_over_pre, "X_train_over_post": X_train_over_post, "y_train_over_post": y_train_over_post, "X_train_under_pre": X_train_under_pre, "y_train_under_pre": y_train_under_pre, "X_train_under_post": X_train_under_post, "y_train_under_post": y_train_under_post, } return data_dic # -------------------------------------------------------------------------------------------------------- # Retrieving parameters for chosen models # -------------------------------------------------------------------------------------------------------- def get_chosen_model(group_str, method_str, model_name): # Read sheet corresponding to group and method with tuned models and their hyperparameters tuned_models_df = pd.read_excel("../model_selection/output_hyperparam/hyperparamers.xlsx", sheet_name=f"{group_str}_{method_str}") tuned_models_df.columns = ['Model', 'Best Parameters'] # Define the mapping from model abbreviations to sklearn model classes model_mapping = { 'DT': DecisionTreeClassifier, 'RF': RandomForestClassifier, 'Bagging': BaggingClassifier, 'AB': AdaBoostClassifier, 'XGB': XGBClassifier, 'LR': LogisticRegression, 'SVM': SVC, 'MLP': MLPClassifier } # Access the row for the given model name by checking the first column (index 0) row = tuned_models_df[tuned_models_df['Model'] == model_name].iloc[0] # Parse the dictionary of parameters from the 'Best Parameters' column parameters = ast.literal_eval(row['Best Parameters']) # Modify parameters based on model specifics or methods if necessary if model_name == 'AB': parameters['algorithm'] = 'SAMME' elif model_name == 'LR': parameters['max_iter'] = 1000 elif model_name == 'SVM': parameters['max_iter'] = 1000 parameters['probability'] = True elif model_name == "MLP": parameters['max_iter'] = 500 # Add class_weight argument for cost-sensitive learning method if 'CW' in method_str: if model_name in ['Bagging', 'AB']: parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced') else: parameters['class_weight'] = 'balanced' # Fetch the class of the model model_class = model_mapping[model_name] # Initialize the model with the parameters chosen_model = model_class(**parameters) # Return if it is a tree model, for SHAP is_tree = model_name not in ['LR', 'SVM', 'MLP'] return chosen_model, is_tree # -------------------------------------------------------------------------------------------------------- if __name__ == "__main__": # Setup # -------------------------------------------------------------------------------------------------------- # Reading data data_dic = read_data() method_names = { 0: "ORIG", 1: "ORIG_CW", 2: "OVER", 3: "UNDER" } model_choices = { "ORIG": "XGB", "ORIG_CW": "RF", "OVER": "XGB", "UNDER": "XGB" } # Retrieve attribute names in order df = pd.read_csv("../gen_train_data/data/input/pre_dataset.csv") attribute_names = list(df.columns.values) # -------------------------------------------------------------------------------------------------------- # Shap value generation # -------------------------------------------------------------------------------------------------------- for i, group in enumerate(['pre', 'post']): # Get test dataset based on group, add column names X_test = pd.DataFrame(data_dic['X_test_' + group], columns=attribute_names) y_test = data_dic['y_test_' + group] for j, method in enumerate(['', '', 'over_', 'under_']): print(f"{group}-{method_names[j]}") # Get train dataset based on group and method X_train = pd.DataFrame(data_dic['X_train_' + method + group], columns=attribute_names) y_train = data_dic['y_train_' + method + group] method_name = method_names[j] # Get chosen tuned model for this group and method context model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name]) # -------------------------------------------------------------------------------------------------------- # Fit model with training data fitted_model = model.fit(X_train[:500], y_train[:500]) # # Check if we are dealing with a tree vs nn model if is_tree: explainer = shap.TreeExplainer(fitted_model) # else: # explainer = shap.KernelExplainer(fitted_model.predict_proba, X_test[:500]) # Compute shap values shap_vals = explainer.shap_values(X_test[:500], check_additivity=False) # Change to true for final results # --------------------------------------------------------------------------------------------------------- # Save results np.save(f"./output/shap_values/{group}_{method_names[j]}", shap_vals) print(f'Shape of numpy array: {shap_vals.shape}') # --------------------------------------------------------------------------------------------------------