From fb6bd6a48305686bb09fed153fdbf8373a0907cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Mar 2022 14:07:20 +0100 Subject: [PATCH] Update bytes to GB with bitshift --- utils/__init__.py | 7 +++---- utils/autobatch.py | 7 ++++--- utils/general.py | 5 +++-- utils/torch_utils.py | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 4658ed6473cd..a63c473a4340 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -21,14 +21,13 @@ def notebook_init(verbose=True): if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + # System info if verbose: - # System info - # gb = 1 / 1000 ** 3 # bytes to GB - gib = 1 / 1024 ** 3 # bytes to GiB + gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage("/") display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)' + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/autobatch.py b/utils/autobatch.py index cb94f041e95d..e53b4787b87d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -34,11 +34,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size + gb = 1 << 30 # bytes to GiB (1024 ** 3) d = str(device).upper() # 'CUDA:0' properties = torch.cuda.get_device_properties(device) # device properties - t = properties.total_memory / 1024 ** 3 # (GiB) - r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) - a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) + t = properties.total_memory / gb # (GiB) + r = torch.cuda.memory_reserved(device) / gb # (GiB) + a = torch.cuda.memory_allocated(device) / gb # (GiB) f = t - (r + a) # free inside reserved LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') diff --git a/utils/general.py b/utils/general.py index d1594a8b5cea..36c180fe4cf2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -223,11 +223,12 @@ def emojis(str=''): def file_size(path): # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) path = Path(path) if path.is_file(): - return path.stat().st_size / 1E6 + return path.stat().st_size / mb elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb else: return 0.0 diff --git a/utils/torch_utils.py b/utils/torch_utils.py index c11d2a4269ef..2e6fba06626a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -86,7 +86,7 @@ def select_device(device='', batch_size=0, newline=True): space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB else: s += 'CPU\n'