diff --git a/model_selection/cv_metric_gen.py b/model_selection/cv_metric_gen.py index 16c48bb1085556f0f60cd0abb37416a34f8523ca..e48b8a9fc7992fa39035960725f70b04f69520a4 100644 --- a/model_selection/cv_metric_gen.py +++ b/model_selection/cv_metric_gen.py @@ -272,15 +272,14 @@ if __name__ == "__main__": # Store the fold scores in the dataframe for metric_name, scores in fold_scores.items(): scores_df.loc[f"{model_name}_{metric_name}"] = np.around(scores, 4) - sheet_name = f"{group}_{method_names[j]}" scores_sheets[sheet_name] = scores_df # Adjust layout and save figure plt.tight_layout() - plt.savefig(f'./output_cv_metrics/curves/{group}_{method_names[j]}.svg', format='svg', dpi=500) + plt.savefig(f'./output/cv_metrics/curves/{group}_{method_names[j]}.svg', format='svg', dpi=500) plt.close(fig) # Write results to Excel file - with pd.ExcelWriter('./output_cv_metrics/metrics.xlsx') as writer: + with pd.ExcelWriter('./output./cv_metrics/metrics.xlsx') as writer: for sheet_name, data in scores_sheets.items(): data.to_excel(writer, sheet_name=sheet_name) print("Successful cv metric generation for tuned models") diff --git a/model_selection/cv_metrics_distr.py b/model_selection/cv_metrics_distr.py index 34dfc33aee4bc40b1e206d0b8844554390f6a135..845eb0fc1f2a161c28503f11c02ae9da3e576781 100644 --- a/model_selection/cv_metrics_distr.py +++ b/model_selection/cv_metrics_distr.py @@ -44,7 +44,7 @@ if __name__ == "__main__": if metric_name in ['F1', 'PREC', 'REC', 'ACC', 'AUROC', 'AUPRC']: ax.set_ylim(0, 1) plt.tight_layout() - fig.savefig(f'./output_cv_metrics/distributions/{group}{method}.svg', format='svg', dpi=600) + fig.savefig(f'./output/cv_metrics/distributions/{group}{method}.svg', format='svg', dpi=600) plt.close(fig) # Close the figure to free up memory print("Succesful distribution plots generation") diff --git a/model_selection/hyperparam_tuning.py b/model_selection/hyperparam_tuning.py index d98dfb0b4cfb2b16ead80b219334d2e4e608201a..2d6bf194f2da311057bce8bd3da1b01ec9aa5367 100644 --- a/model_selection/hyperparam_tuning.py +++ b/model_selection/hyperparam_tuning.py @@ -159,7 +159,7 @@ if __name__ == "__main__": sheets_dict[sheet_name] = hyperparam_df # Write results to Excel file - with pd.ExcelWriter('./output_hyperparam/hyperparamers.xlsx') as writer: + with pd.ExcelWriter('./output/hyperparam/hyperparamers.xlsx') as writer: for sheet_name, data in sheets_dict.items(): data.to_excel(writer, sheet_name=sheet_name) diff --git a/model_selection/test_models.py b/model_selection/test_models.py index 8ab1b46a3a2cb03e4367de38fe8ad45d98937ff2..3652f295f9e42d44b2e8b783342f15693a43d03a 100644 --- a/model_selection/test_models.py +++ b/model_selection/test_models.py @@ -77,7 +77,7 @@ def read_data(): def get_tuned_models(group_str, method_str): # Read sheet corresponding to group and method with tuned models and their hyperparam - tuned_models_df = pd.read_excel("./output_hyperparam/hyperparamers.xlsx",sheet_name=f"{group_str}_{method_str}") + tuned_models_df = pd.read_excel("./output/hyperparam/hyperparamers.xlsx",sheet_name=f"{group_str}_{method_str}") # Mapping from model abbreviations to sklearn model classes model_mapping = { 'DT': DecisionTreeClassifier, @@ -274,13 +274,13 @@ if __name__ == "__main__": # ---------------------------------------------------------- # Adjust layout and save/show figure plt.tight_layout() - plt.savefig(f'./output_test/plots/{group}_{method_names[j]}.svg', format='svg', dpi=500) + plt.savefig(f'./output/testing/plots/{group}_{method_names[j]}.svg', format='svg', dpi=500) plt.close(fig) # Store the DataFrame in the dictionary with a unique key for each sheet sheet_name = f"{group}_{method_names[j]}" scores_sheets[sheet_name] = scores_df # Write results to Excel file - with pd.ExcelWriter('./output_test/testing_tuned_models.xlsx') as writer: + with pd.ExcelWriter('./output/testing/testing_tuned_models.xlsx') as writer: for sheet_name, data in scores_sheets.items(): data.to_excel(writer, sheet_name=sheet_name) print("Successful evaluation with test dataset")