From a76b28312838f44ef8427688f35ec2b8b035b987 Mon Sep 17 00:00:00 2001 From: MaxTeselkin Date: Thu, 27 Apr 2023 01:39:52 +0300 Subject: [PATCH 1/2] added agnostic nms support --- supervisely/serve/custom_settings.yaml | 5 +- supervisely/serve/src/main.py | 101 +++++++++++++++---------- 2 files changed, 65 insertions(+), 41 deletions(-) diff --git a/supervisely/serve/custom_settings.yaml b/supervisely/serve/custom_settings.yaml index ba7e0c2bc53c..145ec2b9bda4 100644 --- a/supervisely/serve/custom_settings.yaml +++ b/supervisely/serve/custom_settings.yaml @@ -8,4 +8,7 @@ iou_thres: 0.45 augment: False # save image with predictions (for developers) -debug_visualization: False \ No newline at end of file +debug_visualization: False + +# whether to use agnostic nms or not +agnostic_nms: False \ No newline at end of file diff --git a/supervisely/serve/src/main.py b/supervisely/serve/src/main.py index 09caa686d60a..26031821ffb8 100644 --- a/supervisely/serve/src/main.py +++ b/supervisely/serve/src/main.py @@ -1,5 +1,6 @@ import os import sys + try: from typing import Literal except: @@ -22,11 +23,14 @@ load_dotenv(os.path.join(app_source_path, "local.env")) load_dotenv(os.path.expanduser("~/supervisely.env")) -model_weights_options = os.environ['modal.state.modelWeightsOptions'] -pretrained_weights = os.environ['modal.state.selectedModel'].lower() -custom_weights = os.environ['modal.state.weightsPath'] +model_weights_options = os.environ["modal.state.modelWeightsOptions"] +pretrained_weights = os.environ["modal.state.selectedModel"].lower() +custom_weights = os.environ["modal.state.weightsPath"] + +pretrained_weights_url = ( + f"https://github.com/ultralytics/yolov5/releases/download/v5.0/{pretrained_weights}.pt" +) -pretrained_weights_url = f"https://github.com/ultralytics/yolov5/releases/download/v5.0/{pretrained_weights}.pt" class YOLOv5Model(sly.nn.inference.ObjectDetection): def load_on_device( @@ -39,27 +43,29 @@ def load_on_device( self.local_weights_path = self.download(pretrained_weights_url) if model_weights_options == "custom": self.local_weights_path = self.download(custom_weights) - cfg_path_in_teamfiles = os.path.join(Path(custom_weights).parents[1], 'opt.yaml') + cfg_path_in_teamfiles = os.path.join(Path(custom_weights).parents[1], "opt.yaml") configs_local_path = self.download(cfg_path_in_teamfiles) self.device = select_device(device) - self.half = self.device.type != 'cpu' # half precision only supported on CUDA + self.half = self.device.type != "cpu" # half precision only supported on CUDA self.model = attempt_load(self.local_weights_path, map_location=device) # load FP32 model try: - with open(configs_local_path, 'r') as stream: + with open(configs_local_path, "r") as stream: cfgs_loaded = yaml.safe_load(stream) except: cfgs_loaded = None - if hasattr(self.model, 'module') and hasattr(self.model.module, 'img_size'): + if hasattr(self.model, "module") and hasattr(self.model.module, "img_size"): imgsz = self.model.module.img_size[0] - elif hasattr(self.model, 'img_size'): + elif hasattr(self.model, "img_size"): imgsz = self.model.img_size[0] - elif cfgs_loaded is not None and cfgs_loaded['img_size']: - imgsz = cfgs_loaded['img_size'][0] + elif cfgs_loaded is not None and cfgs_loaded["img_size"]: + imgsz = cfgs_loaded["img_size"][0] else: default_img_size = 640 - sly.logger.warning(f"Image size is not found in model checkpoint. Use default: {default_img_size}") + sly.logger.warning( + f"Image size is not found in model checkpoint. Use default: {default_img_size}" + ) imgsz = default_img_size self.stride = int(self.model.stride.max()) # model stride self.imgsz = check_img_size(imgsz, s=self.stride) # check img_size @@ -67,27 +73,36 @@ def load_on_device( if self.half: self.model.half() # to FP16 - if self.device.type != 'cpu': + if self.device.type != "cpu": self.model( - torch.zeros(1, 3, self.imgsz, self.imgsz).to(self.device).type_as(next(self.model.parameters())) + torch.zeros(1, 3, self.imgsz, self.imgsz) + .to(self.device) + .type_as(next(self.model.parameters())) ) # run once - self.class_names = self.model.module.names if hasattr(self.model, 'module') else self.model.names + self.class_names = ( + self.model.module.names if hasattr(self.model, "module") else self.model.names + ) colors = None - if hasattr(self.model, 'module') and hasattr(self.model.module, 'colors'): + if hasattr(self.model, "module") and hasattr(self.model.module, "colors"): colors = self.model.module.colors - elif hasattr(self.model, 'colors'): + elif hasattr(self.model, "colors"): colors = self.model.colors else: colors = [] for i in range(len(self.class_names)): colors.append(sly.color.generate_rgb(exist_colors=colors)) - obj_classes = [sly.ObjClass(name, sly.Rectangle, color) for name, color in zip(self.class_names, colors)] + obj_classes = [ + sly.ObjClass(name, sly.Rectangle, color) + for name, color in zip(self.class_names, colors) + ] - self._model_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(obj_classes), - tag_metas=sly.TagMetaCollection([self._get_confidence_tag_meta()])) + self._model_meta = sly.ProjectMeta( + obj_classes=sly.ObjClassCollection(obj_classes), + tag_metas=sly.TagMetaCollection([self._get_confidence_tag_meta()]), + ) print(f"✅ Model has been successfully loaded on {device.upper()} device") @@ -98,16 +113,16 @@ def get_info(self): info = super().get_info() info["model_name"] = "YOLOv5" info["checkpoint_name"] = pretrained_weights - info["pretrained_on_dataset"] = "COCO train 2017" if model_weights_options == "pretrained" else "custom" + info["pretrained_on_dataset"] = ( + "COCO train 2017" if model_weights_options == "pretrained" else "custom" + ) info["device"] = self.device.type info["sliding_window_support"] = self.sliding_window_mode info["half"] = str(self.half) info["input_size"] = self.imgsz return info - def predict( - self, image_path: str, settings: Dict[str, Any] - ) -> List[sly.nn.PredictionBBox]: + def predict(self, image_path: str, settings: Dict[str, Any]) -> List[sly.nn.PredictionBBox]: conf_thres = settings.get("conf_thres", self.custom_inference_settings_dict["conf_thres"]) iou_thres = settings.get("iou_thres", self.custom_inference_settings_dict["iou_thres"]) @@ -129,8 +144,10 @@ def predict( inf_out = self.model(img, augment=augment)[0] # Apply NMS - output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=False) - + output = non_max_suppression( + inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=settings["agnostic_nms"] + ) + predictions = [] for det in output: if det is not None and len(det) > 0: @@ -138,13 +155,13 @@ def predict( for *xyxy, conf, cls in reversed(det): bbox = [int(xyxy[1]), int(xyxy[0]), int(xyxy[3]), int(xyxy[2])] - predictions.append(sly.nn.PredictionBBox(self.class_names[int(cls)], bbox, conf.item())) + predictions.append( + sly.nn.PredictionBBox(self.class_names[int(cls)], bbox, conf.item()) + ) return predictions - def predict_raw( - self, image_path: str, settings: Dict[str, Any] - ) -> List[sly.nn.PredictionBBox]: + def predict_raw(self, image_path: str, settings: Dict[str, Any]) -> List[sly.nn.PredictionBBox]: conf_thres = settings.get("conf_thres") augment = settings.get("augment") @@ -165,11 +182,11 @@ def predict_raw( img = img.unsqueeze(0) inf_out = self.model(img, augment=augment)[0][0] - + inf_out[:, 5:] *= inf_out[:, 4:5] # conf = obj_conf * cls_conf # Box (center x, center y, width, height) to (x1, y1, x2, y2) box = xywh2xyxy(inf_out[:, :4]) - conf, j = inf_out[:, 5:].max(1, keepdim=True) # best class + conf, j = inf_out[:, 5:].max(1, keepdim=True) # best class det = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round() @@ -180,20 +197,24 @@ def predict_raw( return predictions -sly.logger.info("Script arguments", extra={ - "teamId": sly.env.team_id(), - "workspaceId": sly.env.workspace_id(), - "modal.state.modelWeightsOptions": model_weights_options, - "modal.state.modelSize": pretrained_weights, - "modal.state.weightsPath": custom_weights -}) + +sly.logger.info( + "Script arguments", + extra={ + "teamId": sly.env.team_id(), + "workspaceId": sly.env.workspace_id(), + "modal.state.modelWeightsOptions": model_weights_options, + "modal.state.modelSize": pretrained_weights, + "modal.state.weightsPath": custom_weights, + }, +) device = "cuda" if torch.cuda.is_available() else "cpu" print("Using device:", device) m = YOLOv5Model( custom_inference_settings=os.path.join(app_source_path, "custom_settings.yaml"), - sliding_window_mode = "advanced" + sliding_window_mode="advanced", ) m.load_on_device(device=device) From 9e56dfb17b4fa3f8c7a565bc881b7d52990e232d Mon Sep 17 00:00:00 2001 From: MaxTeselkin Date: Thu, 27 Apr 2023 01:51:45 +0300 Subject: [PATCH 2/2] check if agnostic_nms in settings --- supervisely/serve/src/main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/supervisely/serve/src/main.py b/supervisely/serve/src/main.py index 26031821ffb8..920cd902678d 100644 --- a/supervisely/serve/src/main.py +++ b/supervisely/serve/src/main.py @@ -144,8 +144,12 @@ def predict(self, image_path: str, settings: Dict[str, Any]) -> List[sly.nn.Pred inf_out = self.model(img, augment=augment)[0] # Apply NMS + if "agnostic_nms" in settings: + is_agnostic = settings["agnostic_nms"] + else: + is_agnostic = False output = non_max_suppression( - inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=settings["agnostic_nms"] + inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=is_agnostic ) predictions = []