diff --git a/utils/metrics.py b/utils/metrics.py index f97ed3268d60..3af8f9552478 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -38,7 +38,6 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision # Create Precision-Recall curve and compute AP for each class px, py = np.linspace(0, 1, 1000), [] # for plotting - # pr_score = np.linspace(0, 1, 100) # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) ap, p, r = np.zeros(s), np.zeros(s + [1000]), np.zeros(s + [1000]) for ci, c in enumerate(unique_classes): @@ -210,9 +209,9 @@ def plot_mc_curve(px, py, save_dir='.', names=(), xlabel='Confidence', ylabel='M if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(recall, precision) + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + ax.plot(px, py, linewidth=1, color='grey') # plot(confidence, metric) y = py.mean(1) ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')