Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update TorchScript suffix to *.torchscript #5856

Merged
merged 1 commit into from
Dec 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
imgsz = check_img_size(imgsz, s=stride) # check image size

# Half
half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt or jit:
model.model.half() if half else model.model.float()

# Dataloader
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
bs = len(dataset) # batch_size
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs

Expand Down
6 changes: 3 additions & 3 deletions export.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
Format | Example | Export `include=(...)` argument
--- | --- | ---
PyTorch | yolov5s.pt | -
TorchScript | yolov5s.torchscript.pt | 'torchscript'
TorchScript | yolov5s.torchscript | 'torchscript'
ONNX | yolov5s.onnx | 'onnx'
CoreML | yolov5s.mlmodel | 'coreml'
TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model'
Expand All @@ -19,7 +19,7 @@

Inference:
$ python path/to/detect.py --weights yolov5s.pt
yolov5s.torchscript.pt
yolov5s.torchscript
yolov5s.onnx
yolov5s.mlmodel (under development)
yolov5s_saved_model
Expand Down Expand Up @@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:'
# YOLOv5 TorchScript model export
try:
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
f = file.with_suffix('.torchscript.pt')
f = file.with_suffix('.torchscript')

ts = torch.jit.trace(model, im, strict=False)
d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
Expand Down
12 changes: 6 additions & 6 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module):
def __init__(self, weights='yolov5s.pt', device=None, dnn=True):
# Usage:
# PyTorch: weights = *.pt
# TorchScript: *.torchscript.pt
# TorchScript: *.torchscript
# CoreML: *.mlmodel
# TensorFlow: *_saved_model
# TensorFlow: *.pb
Expand All @@ -289,10 +289,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True):
# TensorRT: *.engine
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']
suffix = Path(w).suffix.lower()
suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']
check_suffix(w, suffixes) # check weights have acceptable suffix
pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans
jit = pt and 'torchscript' in w.lower()
pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans
stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults

if jit: # TorchScript
Expand All @@ -304,10 +304,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True):
stride, names = int(d['stride']), d['names']
elif pt: # PyTorch
from models.experimental import attempt_load # scoped to avoid circular import
model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device)
model = attempt_load(weights, map_location=device)
stride = int(model.stride.max()) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
elif coreml: # CoreML *.mlmodel
elif coreml: # CoreML
import coremltools as ct
model = ct.models.MLModel(w)
elif dnn: # ONNX OpenCV DNN
Expand Down
4 changes: 2 additions & 2 deletions utils/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ def forward(x):
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for torchscript, CoreML and ONNX
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX


# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
Expand Down
10 changes: 5 additions & 5 deletions val.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def run(data,
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model

half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
Expand All @@ -124,10 +124,10 @@ def run(data,

# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn)
stride, pt, engine = model.stride, model.pt, model.engine
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size
half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt or jit:
model.model.half() if half else model.model.float()
elif engine:
batch_size = model.batch_size
Expand Down Expand Up @@ -166,7 +166,7 @@ def run(data,
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
t1 = time_sync()
if pt or engine:
if pt or jit or engine:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
Expand Down