Commit 19e2acc1 authored by Joaquin Torres's avatar Joaquin Torres

Script ready to fit final models

parent 79cfdb2c
# Libraries
# --------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import shap
import ast
import matplotlib.pyplot as plt
import pickle
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# --------------------------------------------------------------------------------------------------------
# Reading training data
# --------------------------------------------------------------------------------------------------------
def read_training_data(attribute_names):
# Load ORIGINAL training data
X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
X_train_post = np.load('../gen_train_data/data/output/post/X_train_post.npy', allow_pickle=True)
y_train_post = np.load('../gen_train_data/data/output/post/y_train_post.npy', allow_pickle=True)
# Load oversampled training data
X_train_over_pre = np.load('../gen_train_data/data/output/pre/X_train_over_pre.npy', allow_pickle=True)
y_train_over_pre = np.load('../gen_train_data/data/output/pre/y_train_over_pre.npy', allow_pickle=True)
X_train_over_post = np.load('../gen_train_data/data/output/post/X_train_over_post.npy', allow_pickle=True)
y_train_over_post = np.load('../gen_train_data/data/output/post/y_train_over_post.npy', allow_pickle=True)
# Load undersampled training data
X_train_under_pre = np.load('../gen_train_data/data/output/pre/X_train_under_pre.npy', allow_pickle=True)
y_train_under_pre = np.load('../gen_train_data/data/output/pre/y_train_under_pre.npy', allow_pickle=True)
X_train_under_post = np.load('../gen_train_data/data/output/post/X_train_under_post.npy', allow_pickle=True)
y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)
# Type conversion needed
data_dic = {
"X_train_pre": pd.DataFrame(X_train_pre, columns=attribute_names).convert_dtypes(),
"y_train_pre": y_train_pre,
"X_train_post": pd.DataFrame(X_train_post, columns=attribute_names).convert_dtypes(),
"y_train_post": y_train_post,
"X_train_over_pre": pd.DataFrame(X_train_over_pre, columns=attribute_names).convert_dtypes(),
"y_train_over_pre": y_train_over_pre,
"X_train_over_post": pd.DataFrame(X_train_over_post, columns=attribute_names).convert_dtypes(),
"y_train_over_post": y_train_over_post,
"X_train_under_pre": pd.DataFrame(X_train_under_pre, columns=attribute_names).convert_dtypes(),
"y_train_under_pre": y_train_under_pre,
"X_train_under_post": pd.DataFrame(X_train_under_post, columns=attribute_names).convert_dtypes(),
"y_train_under_post": y_train_under_post,
}
return data_dic
# --------------------------------------------------------------------------------------------------------
# Initializing chosen models from hyperparameters file
# --------------------------------------------------------------------------------------------------------
def get_chosen_model(group_str, method_str, model_name):
# Read sheet corresponding to group and method with tuned models and their hyperparameters
tuned_models_df = pd.read_excel("../model_selection/output_hyperparam/hyperparamers.xlsx", sheet_name=f"{group_str}_{method_str}")
tuned_models_df.columns = ['Model', 'Best Parameters']
# Define the mapping from model abbreviations to sklearn model classes
model_mapping = {
'DT': DecisionTreeClassifier,
'RF': RandomForestClassifier,
'Bagging': BaggingClassifier,
'AB': AdaBoostClassifier,
'XGB': XGBClassifier,
'LR': LogisticRegression,
'SVM': SVC,
'MLP': MLPClassifier
}
# Access the row for the given model name by checking the first column (index 0)
row = tuned_models_df[tuned_models_df['Model'] == model_name].iloc[0]
# Parse the dictionary of parameters from the 'Best Parameters' column
parameters = ast.literal_eval(row['Best Parameters'])
# Modify parameters based on model specifics or methods if necessary
if model_name == 'AB':
parameters['algorithm'] = 'SAMME'
elif model_name == 'LR':
parameters['max_iter'] = 1000
elif model_name == 'SVM':
parameters['max_iter'] = 1000
parameters['probability'] = True
elif model_name == "MLP":
parameters['max_iter'] = 500
# Add class_weight argument for cost-sensitive learning method
if 'CW' in method_str:
if model_name in ['Bagging', 'AB']:
parameters['estimator'] = DecisionTreeClassifier(class_weight='balanced')
else:
parameters['class_weight'] = 'balanced'
# Fetch the class of the model
model_class = model_mapping[model_name]
# Initialize the model with the parameters
chosen_model = model_class(**parameters)
# Return if it is a tree model, for SHAP
is_tree = model_name not in ['LR', 'SVM', 'MLP']
return chosen_model, is_tree
# --------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Setup
# --------------------------------------------------------------------------------------------------------
# Retrieve attribute names in order
attribute_names = list(np.load('../gen_train_data/data/output/attributes.npy', allow_pickle=True))
# Reading data
data_dic = read_training_data(attribute_names)
method_names = {
0: "ORIG",
1: "ORIG_CW",
2: "OVER",
3: "UNDER"
}
model_choices = {
"ORIG": "XGB",
"ORIG_CW": "RF",
"OVER": "XGB",
"UNDER": "XGB"
}
# --------------------------------------------------------------------------------------------------------
# Fitting final models with whole training dataset
# --------------------------------------------------------------------------------------------------------
for i, group in enumerate(['pre', 'post']):
for j, method in enumerate(['', '', 'over_', 'under_']):
print(f"{group}-{method_names[j]}")
# Get train dataset based on group and method
X_train = data_dic['X_train_' + method + group]
y_train = data_dic['y_train_' + method + group]
method_name = method_names[j]
# Get chosen tuned model for this group and method context
model, is_tree = get_chosen_model(group_str=group, method_str=method_name, model_name=model_choices[method_name])
fitted_model = model.fit(X_train, y_train)
# Define the file path where you want to save the model
model_save_path = f"./output/fitted_models/{group}_{method_names[j]}_{model_choices[method_name]}.pkl"
# Save the model to disk
with open(model_save_path, 'wb') as f:
pickle.dump(model, f)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment