From aaf1223d38c6978fee389cce5b3bfe3ecf9df88a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:11:36 +0100 Subject: [PATCH 1/3] Update autobatch.py --- utils/autobatch.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 1632e9bc6a5a..9dffd2fb85a9 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -35,11 +35,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): return batch_size d = str(device).upper() # 'CUDA:0' - t = torch.cuda.get_device_properties(device).total_memory / 1024 ** 3 # (GB) - r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GB) - a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GB) + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / 1024 ** 3 # (GiB) + r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) + a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') + print(f'{prefix}{d} {properties.name} {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') batch_sizes = [1, 2, 4, 8, 16] try: From 047094a50d22bbf60723fbeed998905bf3ee489c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:17:34 +0100 Subject: [PATCH 2/3] Update autobatch.py --- utils/autobatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 9dffd2fb85a9..d557e29d156e 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -40,7 +40,7 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} {properties.name} {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') + print(f'{prefix}{d} ({properties.name}) {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') batch_sizes = [1, 2, 4, 8, 16] try: From 49e79e9e06e6c0faf99dd83d790ae4414705827b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:28:46 +0100 Subject: [PATCH 3/3] Update autobatch.py --- utils/autobatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index d557e29d156e..3f2b4d1a4c38 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -40,7 +40,7 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} ({properties.name}) {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') + print(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') batch_sizes = [1, 2, 4, 8, 16] try: @@ -53,5 +53,5 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') + print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') return b