diff --git a/hubconf.py b/hubconf.py index 33fc87930582..bffe2d588b4f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS diff --git a/utils/autobatch.py b/utils/autobatch.py index 01152055196d..641b055b9fe3 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -18,7 +18,7 @@ def check_train_batch_size(model, imgsz=640, amp=True): return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size -def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): +def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): # Automatically estimate best batch size to use `fraction` of available CUDA memory # Usage: # import torch