diff --git a/model_selection/cv_metric_gen.py b/model_selection/cv_metric_gen.py index 7bd4c3cacebe41c12d1043d1e312811cc5e5c505..16c48bb1085556f0f60cd0abb37416a34f8523ca 100644 --- a/model_selection/cv_metric_gen.py +++ b/model_selection/cv_metric_gen.py @@ -113,26 +113,25 @@ def TN_scorer(clf, X, y): """Gives the number of samples predicted as true negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - TN = cm[0,0] - return TN + return cm[0, 0] + def FN_scorer(clf, X, y): """Gives the number of samples predicted as false negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - FN = cm[0,1] - return FN + return cm[1, 0] + def FP_scorer(clf, X, y): - """Gives the number of samples predicted as false positive""" + """Gives the number of samples predicted as false positives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - FP = cm[1,0] - return FP + return cm[0, 1] + def TP_scorer(clf, X, y): - """Gives the number of samples predicted as true positive""" + """Gives the number of samples predicted as true positives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - TP = cm[1,1] - return TP + return cm[1, 1] def negative_recall_scorer(clf, X, y): """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)""" diff --git a/model_selection/test_models.py b/model_selection/test_models.py index 2cf3f1e32ca8c93b4ae6850300f98f3ddb95df07..8ab1b46a3a2cb03e4367de38fe8ad45d98937ff2 100644 --- a/model_selection/test_models.py +++ b/model_selection/test_models.py @@ -124,26 +124,25 @@ def TN_scorer(clf, X, y): """Gives the number of samples predicted as true negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - TN = cm[0,0] - return TN + return cm[0, 0] + def FN_scorer(clf, X, y): """Gives the number of samples predicted as false negatives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - FN = cm[0,1] - return FN + return cm[1, 0] + def FP_scorer(clf, X, y): - """Gives the number of samples predicted as false positive""" + """Gives the number of samples predicted as false positives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - FP = cm[1,0] - return FP + return cm[0, 1] + def TP_scorer(clf, X, y): - """Gives the number of samples predicted as true positive""" + """Gives the number of samples predicted as true positives""" y_pred = clf.predict(X) cm = confusion_matrix(y, y_pred) - TP = cm[1,1] - return TP + return cm[1, 1] def negative_recall_scorer(clf, X, y): """Gives the negative recall defined as the (number of true_negative_samples)/(total number of negative samples)"""