Skip to content

Commit

Permalink
Segment prediction labels normalization fix (#10205)
Browse files Browse the repository at this point in the history
* normalize_segments

* round remove

* swap axes fix
  • Loading branch information
glenn-jocher committed Nov 18, 2022
1 parent ff6e6e3 commit 467a57f
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 8 deletions.
2 changes: 1 addition & 1 deletion segment/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def run(
# Segments
if save_txt:
segments = reversed(masks2segments(masks))
segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments]
segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments]

# Print results
for c in det[:, 5].unique():
Expand Down
17 changes: 10 additions & 7 deletions utils/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -822,7 +822,7 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
return boxes


def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None):
def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
Expand All @@ -835,6 +835,9 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None):
segments[:, 1] -= pad[1] # y padding
segments /= gain
clip_segments(segments, img0_shape)
if normalize:
segments[:, 0] /= img0_shape[1] # width
segments[:, 1] /= img0_shape[0] # height
return segments


Expand All @@ -850,14 +853,14 @@ def clip_boxes(boxes, shape):
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2


def clip_segments(boxes, shape):
def clip_segments(segments, shape):
# Clip segments (xy1,xy2,...) to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x
boxes[:, 1].clamp_(0, shape[0]) # y
if isinstance(segments, torch.Tensor): # faster individually
segments[:, 0].clamp_(0, shape[1]) # x
segments[:, 1].clamp_(0, shape[0]) # y
else: # np.array (faster grouped)
boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x
boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y
segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x
segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y


def non_max_suppression(
Expand Down

0 comments on commit 467a57f

Please sign in to comment.