diff --git a/src/axolotl/cli/__init__.py b/src/axolotl/cli/__init__.py index a68377f07..15da78b09 100644 --- a/src/axolotl/cli/__init__.py +++ b/src/axolotl/cli/__init__.py @@ -410,7 +410,10 @@ def ultra_apply_chatml(sample): # pylint: disable=possibly-unused-variable for i, data_set in enumerate(train_datasets): _type = cfg.datasets[i]["type"] ds_type_fn = locals()[_type] - train_datasets[i] = data_set.map(ds_type_fn) + train_datasets[i] = data_set.map( + ds_type_fn, + desc="Mapping RL Dataset", + ) train_dataset = concatenate_datasets(train_datasets) # eval_dataset = eval_dataset.map(intel_apply_chatml) diff --git a/src/axolotl/datasets.py b/src/axolotl/datasets.py index 837b0d674..1f04889c2 100644 --- a/src/axolotl/datasets.py +++ b/src/axolotl/datasets.py @@ -57,6 +57,7 @@ def process(self, dataset): num_proc=num_proc, remove_columns=features, keep_in_memory=self.keep_in_memory, + desc="Tokenizing Prompts", **map_kwargs, ) diff --git a/src/axolotl/utils/data.py b/src/axolotl/utils/data.py index 3691a6e14..5c4cd148b 100644 --- a/src/axolotl/utils/data.py +++ b/src/axolotl/utils/data.py @@ -792,6 +792,7 @@ def load_pretraining_dataset(path, tokenizer, cfg, name=None, max_tokens=2048, s # remove all the existing columns after mapping since they end up having # a different length than the encoded/tokenized column remove_columns=dataset.features.keys(), + desc="Encoding Pretraining", ) return dataset diff --git a/src/axolotl/utils/trainer.py b/src/axolotl/utils/trainer.py index 2dec90eb7..dfd3385b7 100644 --- a/src/axolotl/utils/trainer.py +++ b/src/axolotl/utils/trainer.py @@ -134,12 +134,14 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer): drop_long, num_proc=cfg.dataset_processes, load_from_cache_file=not cfg.is_preprocess, + desc="Dropping Long Sequences", ) if eval_dataset: eval_dataset = eval_dataset.filter( drop_long, num_proc=cfg.dataset_processes, load_from_cache_file=not cfg.is_preprocess, + desc="Dropping Long Sequences", ) if cfg.group_by_length: @@ -147,6 +149,7 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer): add_length, num_proc=cfg.dataset_processes, load_from_cache_file=not cfg.is_preprocess, + desc="Group By Length", ) if cfg.sample_packing: @@ -154,6 +157,7 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer): add_position_ids, num_proc=cfg.dataset_processes, load_from_cache_file=not cfg.is_preprocess, + desc="Add position_id column (Sample Packing)", ) if cfg.eval_sample_packing is not False: if eval_dataset: @@ -161,6 +165,7 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer): add_position_ids, num_proc=cfg.dataset_processes, load_from_cache_file=not cfg.is_preprocess, + desc="Add position_id column (Sample Packing)", ) return train_dataset, eval_dataset @@ -169,9 +174,13 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer): def process_pretraining_datasets_for_packing(train_dataset, sequence_len): drop_long = partial(drop_long_seq, sequence_len=sequence_len) - train_dataset = train_dataset.filter(drop_long) + train_dataset = train_dataset.filter( + drop_long, + desc="Dropping Long Sequences", + ) train_dataset = train_dataset.map( add_position_ids, + desc="Add position_id column (Pretraining Sample Packing)", ) return train_dataset