Commit c556b024 authored by Joaquin Torres's avatar Joaquin Torres

minor fixes

parent b7ae7c60
...@@ -12,9 +12,7 @@ ...@@ -12,9 +12,7 @@
import pandas as pd import pandas as pd
import numpy as np import numpy as np
from xgboost import XGBClassifier from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score, make_scorer, precision_score, recall_score
from sklearn.model_selection import StratifiedKFold, cross_validate
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC from sklearn.svm import SVC
...@@ -24,17 +22,11 @@ from scipy.stats import randint, uniform ...@@ -24,17 +22,11 @@ from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import RandomizedSearchCV
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
# Function to read datasets # Function to read training datasets
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
def read_data(): def read_data():
import numpy as np import numpy as np
# Load test data
X_test_pre = np.load('../gen_train_data/data/output/pre/X_test_pre.npy', allow_pickle=True)
y_test_pre = np.load('../gen_train_data/data/output/pre/y_test_pre.npy', allow_pickle=True)
X_test_post = np.load('../gen_train_data/data/output/post/X_test_post.npy', allow_pickle=True)
y_test_post = np.load('../gen_train_data/data/output/post/y_test_post.npy', allow_pickle=True)
# Load ORIGINAL training data # Load ORIGINAL training data
X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True) X_train_pre = np.load('../gen_train_data/data/output/pre/X_train_pre.npy', allow_pickle=True)
y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True) y_train_pre = np.load('../gen_train_data/data/output/pre/y_train_pre.npy', allow_pickle=True)
...@@ -54,10 +46,6 @@ def read_data(): ...@@ -54,10 +46,6 @@ def read_data():
y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True) y_train_under_post = np.load('../gen_train_data/data/output/post/y_train_under_post.npy', allow_pickle=True)
data_dic = { data_dic = {
"X_test_pre": X_test_pre,
"y_test_pre": y_test_pre,
"X_test_post": X_test_post,
"y_test_post": y_test_post,
"X_train_pre": X_train_pre, "X_train_pre": X_train_pre,
"y_train_pre": y_train_pre, "y_train_pre": y_train_pre,
"X_train_post": X_train_post, "X_train_post": X_train_post,
...@@ -83,28 +71,29 @@ if __name__ == "__main__": ...@@ -83,28 +71,29 @@ if __name__ == "__main__":
# Defining the models to train # Defining the models to train
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
# 1. No class weight # 1. No class weight
models_1 = {"DT" : DecisionTreeClassifier(), models_simple = {"DT" : DecisionTreeClassifier(),
"RF" : RandomForestClassifier(), "RF" : RandomForestClassifier(),
"Bagging" : BaggingClassifier(), "Bagging" : BaggingClassifier(),
"AB" : AdaBoostClassifier(algorithm='SAMME'), "AB" : AdaBoostClassifier(algorithm='SAMME'),
"XGB": XGBClassifier(), "XGB": XGBClassifier(),
"LR" : LogisticRegression(max_iter=1000), "LR" : LogisticRegression(max_iter=1000),
# "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet'),
"SVM" : SVC(probability=True), "SVM" : SVC(probability=True),
"MLP" : MLPClassifier(max_iter=500) "MLP" : MLPClassifier(max_iter=500)
# "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet')
} }
# 2. Class weight: cost-sensitive learning # 2. Class weight: cost-sensitive learning
models_2 = {"DT" : DecisionTreeClassifier(class_weight='balanced'), models_CS = {"DT" : DecisionTreeClassifier(class_weight='balanced'),
"RF" : RandomForestClassifier(class_weight='balanced'), "RF" : RandomForestClassifier(class_weight='balanced'),
"Bagging" : BaggingClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')), "Bagging" : BaggingClassifier(estimator= DecisionTreeClassifier(class_weight='balanced')),
"AB" : AdaBoostClassifier(estimator= DecisionTreeClassifier(class_weight='balanced'), algorithm='SAMME'), "AB" : AdaBoostClassifier(estimator= DecisionTreeClassifier(class_weight='balanced'), algorithm='SAMME'),
# "XGB": XGBClassifier(), # <-
"LR" : LogisticRegression(max_iter=1000, class_weight='balanced'), "LR" : LogisticRegression(max_iter=1000, class_weight='balanced'),
# "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet', class_weight='balanced'),
"SVM" : SVC(probability=True, class_weight='balanced'), "SVM" : SVC(probability=True, class_weight='balanced'),
# "ElNet" : LogisticRegression(max_iter=1000, penalty='elasticnet', class_weight='balanced'),
# "XGB": XGBClassifier(), # <-
# "MLP" : MLPClassifier(max_iter=500) # <- # "MLP" : MLPClassifier(max_iter=500) # <-
} }
# --------------------------------------------------------------------------------------------------------
# Hyperparameter tuning setup # Hyperparameter tuning setup
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
...@@ -126,12 +115,12 @@ if __name__ == "__main__": ...@@ -126,12 +115,12 @@ if __name__ == "__main__":
'learning_rate': uniform(0.01, 0.3)}, 'learning_rate': uniform(0.01, 0.3)},
"LR": {'penalty': ['l1', 'l2', 'elasticnet', None], "LR": {'penalty': ['l1', 'l2', 'elasticnet', None],
'solver': ['lbfgs', 'sag', 'saga']}, 'solver': ['lbfgs', 'sag', 'saga']},
# "ElNet": {'solver': ['lbfgs', 'sag', 'saga']},
"SVM": {'C': uniform(0.8, 1.2), "SVM": {'C': uniform(0.8, 1.2),
'kernel': ['linear', 'poly', 'rbf', 'sigmoid']}, 'kernel': ['linear', 'poly', 'rbf', 'sigmoid']},
"MLP": {'activation': ['identity', 'logistic', 'tanh', 'relu'], "MLP": {'activation': ['identity', 'logistic', 'tanh', 'relu'],
'hidden_layer_sizes': randint(50, 150), 'hidden_layer_sizes': randint(50, 150),
'learning_rate': ['constant', 'invscaling', 'adaptive']} 'learning_rate': ['constant', 'invscaling', 'adaptive']}
# "ElNet": {'solver': ['lbfgs', 'sag', 'saga']},
} }
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
...@@ -151,20 +140,20 @@ if __name__ == "__main__": ...@@ -151,20 +140,20 @@ if __name__ == "__main__":
# -------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------
# Store each df as a sheet in an excel file # Store each df as a sheet in an excel file
sheets_dict = {} sheets_dict = {}
for i, group in enumerate(['pre', 'post']): for i, group in enumerate(['pre']):
for j, method in enumerate(['', '', 'over_', 'under_']): for j, method in enumerate(['', '', 'over_', 'under_']):
# Get dataset based on group and method # Get dataset based on group and method
X = data_dic['X_train_' + method + group] X = data_dic['X_train_' + method + group]
y = data_dic['y_train_' + method + group] y = data_dic['y_train_' + method + group]
# Use group of models with class weight if needed # Use group of models with class weight if needed
models = models_2 if j == 2 else models_1 models = models_CS if j == 2 else models_simple
# Save results: params and best score for each of the mdodels of this method and group # Save results: params and best score for each of the mdodels of this method and group
hyperparam_df = pd.DataFrame(index=list(models.keys()), columns=['Parameters','Score']) hyperparam_df = pd.DataFrame(index=list(models.keys()), columns=['Parameters','Score'])
for model_name, model in models.items(): for model_name, model in models.items():
print(f"{group}-{method}-{model_name} \n\n") print(f"{group}-{method}-{model_name} \n\n")
# Find optimal hyperparams for curr model # Find optimal hyperparams for curr model
params = hyperparameters[model_name] params = hyperparameters[model_name]
search = RandomizedSearchCV(model, param_distributions=params, cv=cv, n_jobs=1, scoring='precision') search = RandomizedSearchCV(model, param_distributions=params, cv=cv, n_jobs=3, scoring='precision')
search.fit(X,y) search.fit(X,y)
hyperparam_df.at[model_name,'Parameters']=search.best_params_ hyperparam_df.at[model_name,'Parameters']=search.best_params_
hyperparam_df.at[model_name,'Score']=round(search.best_score_,4) hyperparam_df.at[model_name,'Score']=round(search.best_score_,4)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment