Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python refactor and simplification #12624

Merged
merged 2 commits into from
Jan 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion export.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
"--quantize_uint8" if int8 else "",
"--output_node_names=Identity,Identity_1,Identity_2,Identity_3",
str(f_pb),
str(f),
f,
]
subprocess.run([arg for arg in args if arg], check=True)

Expand Down
16 changes: 11 additions & 5 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -859,11 +859,17 @@ def pandas(self):
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
r = range(self.n) # iterable
x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
# for d in x:
# for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
# setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
return [
Detections(
[self.ims[i]],
[self.pred[i]],
[self.files[i]],
self.times,
self.names,
self.s,
)
for i in r
]

def print(self):
LOGGER.info(self.__str__())
Expand Down
32 changes: 11 additions & 21 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -682,10 +682,7 @@ def main(opt, callbacks=Callbacks()):
)

# Delete the items in meta dictionary whose first value is False
del_ = []
for item in meta.keys():
if meta[item][0] is False:
del_.append(item)
del_ = [item for item, value_ in meta.items() if value_[0] is False]
hyp_GA = hyp.copy() # Make a copy of hyp dictionary
for item in del_:
del meta[item] # Remove the item from meta dictionary
Expand All @@ -696,9 +693,7 @@ def main(opt, callbacks=Callbacks()):
upper_limit = np.array([meta[k][2] for k in hyp_GA.keys()])

# Create gene_ranges list to hold the range of values for each gene in the population
gene_ranges = []
for i in range(len(upper_limit)):
gene_ranges.append((lower_limit[i], upper_limit[i]))
gene_ranges = [(lower_limit[i], upper_limit[i]) for i in range(len(upper_limit))]

# Initialize the population with initial_values or random values
initial_values = []
Expand All @@ -723,25 +718,20 @@ def main(opt, callbacks=Callbacks()):

# Generate random values within the search space for the rest of the population
if initial_values is None:
population = [generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size)]
else:
if pop_size > 1:
population = [
generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size - len(initial_values))
]
for initial_value in initial_values:
population = [initial_value] + population
population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size)]
elif pop_size > 1:
population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size - len(initial_values))]
for initial_value in initial_values:
population = [initial_value] + population

# Run the genetic algorithm for a fixed number of generations
list_keys = list(hyp_GA.keys())
for generation in range(opt.evolve):
if generation >= 1:
save_dict = {}
for i in range(len(population)):
little_dict = {}
for j in range(len(population[i])):
little_dict[list_keys[j]] = float(population[i][j])
save_dict["gen" + str(generation) + "number" + str(i)] = little_dict
little_dict = {list_keys[j]: float(population[i][j]) for j in range(len(population[i]))}
save_dict[f"gen{str(generation)}number{str(i)}"] = little_dict

with open(save_dir / "evolve_population.yaml", "w") as outfile:
yaml.dump(save_dict, outfile, default_flow_style=False)
Expand Down Expand Up @@ -771,7 +761,7 @@ def main(opt, callbacks=Callbacks()):

# Select the fittest individuals for reproduction using adaptive tournament selection
selected_indices = []
for i in range(pop_size - elite_size):
for _ in range(pop_size - elite_size):
# Adaptive tournament size
tournament_size = max(
max(2, tournament_size_min),
Expand All @@ -788,7 +778,7 @@ def main(opt, callbacks=Callbacks()):
selected_indices.extend(elite_indices)
# Create the next generation through crossover and mutation
next_generation = []
for i in range(pop_size):
for _ in range(pop_size):
parent1_index = selected_indices[random.randint(0, pop_size - 1)]
parent2_index = selected_indices[random.randint(0, pop_size - 1)]
# Adaptive crossover rate
Expand Down
4 changes: 1 addition & 3 deletions utils/downloads.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@ def is_url(url, check=True):
def gsutil_getsize(url=""):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
output = subprocess.check_output(["gsutil", "du", url], shell=True, encoding="utf-8")
if output:
return int(output.split()[0])
return 0
return int(output.split()[0]) if output else 0


def url_getsize(url="https://ultralytics.com/images/bus.jpg"):
Expand Down
2 changes: 1 addition & 1 deletion utils/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ class Profile(contextlib.ContextDecorator):
def __init__(self, t=0.0, device: torch.device = None):
self.t = t
self.device = device
self.cuda = True if (device and str(device)[:4] == "cuda") else False
self.cuda = bool(device and str(device).startswith("cuda"))

def __enter__(self):
self.start = self.time()
Expand Down
25 changes: 11 additions & 14 deletions utils/loggers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,7 @@ def _json_default(value):
value = value.item()
except ValueError: # "only one element tensors can be converted to Python scalars"
pass
if isinstance(value, float):
return value
return str(value)
return value if isinstance(value, float) else str(value)


class Loggers:
Expand Down Expand Up @@ -250,12 +248,12 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
f.write(s + ("%20.5g," * n % tuple([epoch] + vals)).rstrip(",") + "\n")
if self.ndjson_console or self.ndjson_file:
json_data = json.dumps(dict(epoch=epoch, **x), default=_json_default)
if self.ndjson_console:
print(json_data)
if self.ndjson_file:
file = self.save_dir / "results.ndjson"
with open(file, "a") as f:
print(json_data, file=f)
if self.ndjson_console:
print(json_data)
if self.ndjson_file:
file = self.save_dir / "results.ndjson"
with open(file, "a") as f:
print(json_data, file=f)

if self.tb:
for k, v in x.items():
Expand Down Expand Up @@ -370,10 +368,7 @@ def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")):
if clearml and "clearml" in self.include:
try:
# Hyp is not available in classification mode
if "hyp" not in opt:
hyp = {}
else:
hyp = opt.hyp
hyp = {} if "hyp" not in opt else opt.hyp
self.clearml = ClearmlLogger(opt, hyp)
except Exception:
self.clearml = None
Expand Down Expand Up @@ -427,7 +422,9 @@ def log_graph(self, model, imgsz=(640, 640)):
if self.tb:
log_tensorboard_graph(self.tb, model, imgsz)

def log_model(self, model_path, epoch=0, metadata={}):
def log_model(self, model_path, epoch=0, metadata=None):
if metadata is None:
metadata = {}
# Log model to all loggers
if self.wandb:
art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata)
Expand Down
50 changes: 26 additions & 24 deletions utils/loggers/clearml/clearml_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def construct_dataset(clearml_info_string):
"More than one yaml file was found in the dataset root, cannot determine which one contains "
"the dataset definition this way."
)
elif len(yaml_filenames) == 0:
elif not yaml_filenames:
raise ValueError(
"No yaml definition found in dataset root path, check that there is a correct yaml file "
"inside the dataset root path."
Expand All @@ -45,7 +45,7 @@ def construct_dataset(clearml_info_string):
{"train", "test", "val", "nc", "names"}
), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"

data_dict = dict()
data_dict = {}
data_dict["train"] = (
str((dataset_root_path / dataset_definition["train"]).resolve()) if dataset_definition["train"] else None
)
Expand Down Expand Up @@ -96,7 +96,7 @@ def __init__(self, opt, hyp):
self.data_dict = None
if self.clearml:
self.task = Task.init(
project_name=opt.project if not str(opt.project).startswith("runs/") else "YOLOv5",
project_name="YOLOv5" if str(opt.project).startswith("runs/") else opt.project,
task_name=opt.name if opt.name != "exp" else "Training",
tags=["YOLOv5"],
output_uri=True,
Expand Down Expand Up @@ -202,24 +202,26 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres
class_names (dict): dict containing mapping of class int to class name
image (Tensor): A torch tensor containing the actual image data
"""
if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
# Log every bbox_interval times and deduplicate for any intermittend extra eval runs
if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
annotator = Annotator(im=im, pil=True)
for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
color = colors(i)

class_name = class_names[int(class_nr)]
confidence_percentage = round(float(conf) * 100, 2)
label = f"{class_name}: {confidence_percentage}%"

if conf > conf_threshold:
annotator.rectangle(box.cpu().numpy(), outline=color)
annotator.box_label(box.cpu().numpy(), label=label, color=color)

annotated_image = annotator.result()
self.task.get_logger().report_image(
title="Bounding Boxes", series=image_path.name, iteration=self.current_epoch, image=annotated_image
)
self.current_epoch_logged_images.add(image_path)
if (
len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch
and self.current_epoch >= 0
and (self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images)
):
im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
annotator = Annotator(im=im, pil=True)
for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
color = colors(i)

class_name = class_names[int(class_nr)]
confidence_percentage = round(float(conf) * 100, 2)
label = f"{class_name}: {confidence_percentage}%"

if conf > conf_threshold:
annotator.rectangle(box.cpu().numpy(), outline=color)
annotator.box_label(box.cpu().numpy(), label=label, color=color)

annotated_image = annotator.result()
self.task.get_logger().report_image(
title="Bounding Boxes", series=image_path.name, iteration=self.current_epoch, image=annotated_image
)
self.current_epoch_logged_images.add(image_path)
Loading