diff --git a/src/sparseml/modifiers/obcq/utils/helpers.py b/src/sparseml/modifiers/obcq/utils/helpers.py index 455e37986d8..f59a368bec2 100644 --- a/src/sparseml/modifiers/obcq/utils/helpers.py +++ b/src/sparseml/modifiers/obcq/utils/helpers.py @@ -158,10 +158,10 @@ def ppl_eval_general( vocabulary_size = logits[0].shape[-1] logits = [logit[:, :-1, :].view(-1, vocabulary_size) for logit in logits] - logits = torch.concatenate(logits, dim=0).contiguous().to(torch.float32) + logits = torch.cat(logits, dim=0).contiguous().to(torch.float32) labels = [sample[:, 1:].view(-1) for sample in samples] - labels = torch.concatenate(labels, dim=0).to(dev) + labels = torch.cat(labels, dim=0).to(dev) neg_log_likelihood += torch.nn.functional.cross_entropy( logits, labels, diff --git a/src/sparseml/transformers/data/base_llm.py b/src/sparseml/transformers/data/base_llm.py index 61c0b362087..3226bff9778 100644 --- a/src/sparseml/transformers/data/base_llm.py +++ b/src/sparseml/transformers/data/base_llm.py @@ -100,7 +100,7 @@ def _add_end_token(self, tokenized_sample): if len(tokenized_sample) == self._seqlen: tokenized_sample[-1] = self.tokenizer.eos_token_id else: - tokenized_sample = torch.concatenate( + tokenized_sample = torch.cat( ( tokenized_sample, torch.tensor((self.tokenizer.eos_token_id,)),