""" Evaluating optimized models with test data """ # Libraries # -------------------------------------------------------------------------------------------------------- import pandas as pd import numpy as np from xgboost import XGBClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score, accuracy_score from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier # -------------------------------------------------------------------------------------------------------- # Reading test data # -------------------------------------------------------------------------------------------------------- def read_test_data(): # Load test data X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True) y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True) X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True) y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True) data_dic = { "X_test_pre": X_test_pre, "y_test_pre": y_test_pre, "X_test_post": X_test_post, "y_test_post": y_test_post, } return data_dic # -------------------------------------------------------------------------------------------------------- # Returning tuned models for each situation # -------------------------------------------------------------------------------------------------------- def get_tuned_models(group_id, method_id): # 1. PRE if group_id == 0: # 1.1) Trained with original dataset if method_id == 0: tuned_models = { "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'gini'}), "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 117}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 23, 'warm_start': True}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.9189147333140566, 'n_estimators': 131, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.22870029177880222, 'max_depth': 8, 'n_estimators': 909}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), "SVM" : SVC(**{'C': 0.9872682949695772, 'kernel': 'linear', 'max_iter':1000}), "MLP" : MLPClassifier(**{'activation': 'identity', 'hidden_layer_sizes': 122, 'learning_rate': 'invscaling', 'max_iter':500}) } # 1.2) Trained with original dataset and cost-sensitive learning elif method_id == 1: tuned_models = { "DT": DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'entropy', 'class_weight': 'balanced'}), "RF": RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 118, 'class_weight': 'balanced'}), "Bagging": BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 15, 'warm_start': False, 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "AB": AdaBoostClassifier(**{'learning_rate': 0.8159074545140872, 'n_estimators': 121, 'algorithm': 'SAMME', 'estimator': DecisionTreeClassifier(class_weight='balanced')}), "LR": LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'none', 'max_iter': 1000, 'class_weight': 'balanced'}), "SVM": SVC(**{'C': 1.5550524351360953, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced'}), } # 1.3) Trained with oversampled training dataset elif method_id == 2: tuned_models = { "DT" : DecisionTreeClassifier(**{'splitter': 'random', 'max_features': 'sqrt', 'criterion': 'log_loss'}), "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 135}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 26, 'warm_start': True}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.6590924545876917, 'n_estimators': 141, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.26946295284728783, 'max_depth': 7, 'n_estimators': 893}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), "SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000}), "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 116, 'learning_rate': 'invscaling', 'max_iter':500}) } # 1.4) Trained with undersampled training dataset elif method_id == 3: tuned_models = { "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'gini'}), "RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 104}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 38, 'warm_start': True}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.6996764264041269, 'n_estimators': 93, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.26480707899668926, 'max_depth': 7, 'n_estimators': 959}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), "SVM" : SVC(**{'C': 1.1996501173654208, 'kernel': 'poly', 'max_iter':1000}), "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 131, 'learning_rate': 'constant', 'max_iter':500}) } # 2. POST else: # 2.1) Trained with original dataset if method_id == 0: tuned_models = { "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'gini'}), "RF" : RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 213}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 32, 'warm_start': True}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.7806904141367559, 'n_estimators': 66, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.21889089898592098, 'max_depth': 6, 'n_estimators': 856}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': None, 'max_iter': 1000}), "SVM" : SVC(**{'C': 1.9890638540240584, 'kernel': 'linear', 'max_iter':1000}), "MLP" : MLPClassifier(**{'activation': 'logistic', 'hidden_layer_sizes': 112, 'learning_rate': 'constant', 'max_iter':500}) } # 2.2) Trained with original dataset and cost-sensitive learning elif method_id == 1: tuned_models = { # "DT": DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'log2', 'criterion': 'entropy', 'class_weight': 'balanced'}), # "RF": RandomForestClassifier(**{'criterion': 'entropy', 'max_features': 'sqrt', 'n_estimators': 118, 'class_weight': 'balanced'}), # "Bagging": BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 15, 'warm_start': False, 'estimator': DecisionTreeClassifier(class_weight='balanced')}), # "AB": AdaBoostClassifier(**{'learning_rate': 0.8159074545140872, 'n_estimators': 121, 'algorithm': 'SAMME', 'estimator': DecisionTreeClassifier(class_weight='balanced')}), # "LR": LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'none', 'max_iter': 1000, 'class_weight': 'balanced'}), # "SVM": SVC(**{'C': 1.5550524351360953, 'kernel': 'linear', 'max_iter': 1000, 'class_weight': 'balanced'}), } # 2.3) Trained with oversampled training dataset elif method_id == 2: tuned_models = { # "DT" : DecisionTreeClassifier(**{'splitter': 'random', 'max_features': 'sqrt', 'criterion': 'log_loss'}), # "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 135}), # "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 1.0, 'n_estimators': 26, 'warm_start': True}), # "AB" : AdaBoostClassifier(**{'learning_rate': 1.6590924545876917, 'n_estimators': 141, 'algorithm': 'SAMME'}), # "XGB": XGBClassifier(**{'learning_rate': 0.26946295284728783, 'max_depth': 7, 'n_estimators': 893}), # "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), # "SVM" : SVC(**{'C': 1.676419306008229, 'kernel': 'poly', 'max_iter':1000}), # "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 116, 'learning_rate': 'invscaling', 'max_iter':500}) } # 2.4) Trained with undersampled training dataset elif method_id == 3: tuned_models = { "DT" : DecisionTreeClassifier(**{'splitter': 'best', 'max_features': 'sqrt', 'criterion': 'entropy'}), "RF" : RandomForestClassifier(**{'criterion': 'gini', 'max_features': 'sqrt', 'n_estimators': 224}), "Bagging" : BaggingClassifier(**{'max_features': 1.0, 'max_samples': 0.8, 'n_estimators': 13, 'warm_start': True}), "AB" : AdaBoostClassifier(**{'learning_rate': 1.836659462701278, 'n_estimators': 138, 'algorithm': 'SAMME'}), "XGB": XGBClassifier(**{'learning_rate': 0.2517946893282251, 'max_depth': 4, 'n_estimators': 646}), "LR" : LogisticRegression(**{'solver': 'lbfgs', 'penalty': 'l2', 'max_iter': 1000}), "SVM" : SVC(**{'C': 1.8414678085000697, 'kernel': 'linear', 'max_iter':1000}), "MLP" : MLPClassifier(**{'activation': 'relu', 'hidden_layer_sizes': 76, 'learning_rate': 'constant', 'max_iter':500}) } return tuned_models # -------------------------------------------------------------------------------------------------------- # Scorers # -------------------------------------------------------------------------------------------------------- def TN_scorer(clf, X, y): """Gives the number of samples predicted as true negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) TN = cm[0,0] return TN def FN_scorer(clf, X, y): """Gives the number of samples predicted as false negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) FN = cm[0,1] return FN def FP_scorer(clf, X, y): """Gives the number of samples predicted as false positive""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) FP = cm[1,0] return FP def TP_scorer(clf, X, y): """Gives the number of samples predicted as true positive""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) TP = cm[1,1] return TP def negative_recall_scorer(clf, X, y): """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) TN_prop = cm[0,0]/(cm[0,1]+cm[0,0]) return TN_prop # -------------------------------------------------------------------------------------------------------- if __name__ == "__main__": # Reading testing data data_dic = read_test_data() # Setup # -------------------------------------------------------------------------------------------------------- # Scorings to use for model evaluation scorings = { 'F1':make_scorer(f1_score), 'NREC': negative_recall_scorer, 'REC':make_scorer(recall_score), 'PREC':make_scorer(precision_score), 'ACC': make_scorer(accuracy_score), 'TN':TN_scorer, 'FN':FN_scorer, 'FP':FP_scorer, 'TP':TP_scorer } # AUROC and AUPRC (plot?) method_names = { 0: "ORIG", 1: "ORIG_CW", 2: "OVER", 3: "UNDER" } # -------------------------------------------------------------------------------------------------------- # Evaluating performance using test dataset # -------------------------------------------------------------------------------------------------------- scores_sheets = {} # To store score dfs as sheets in the same excel file for i, group in enumerate(['pre', 'post']): # Get test dataset based on group X = data_dic['X_test' + group] y = data_dic['y_test' + group] for j, method in enumerate(['', '', 'over_', 'under_']): # Get tuned models for this group and method models = get_tuned_models(group_id=i, method_id=j) # Scores df scores_df = pd.DataFrame(index=models.keys(), columns=scorings.keys()) # Evaluate each model for model_name, model in models.items(): # At each of the scores of interest for score_name, scorer in scorings.items(): score_value = scorer(model, X, y) scores_df.at[model_name, score_name] = score_value # Store the DataFrame in the dictionary with a unique key for each sheet sheet_name = f"{group}_{method_names[j]}" scores_sheets[sheet_name] = scores_df # Write results to Excel file with pd.ExcelWriter('./training_models/output/testing_tuned_models.xlsx') as writer: for sheet_name, data in scores_sheets.items(): data.to_excel(writer, sheet_name=sheet_name) # --------------------------------------------------------------------------------------------------------