Skip to content

Commit

Permalink
fix: torch_dtype mistral default to fp32
Browse files Browse the repository at this point in the history
  • Loading branch information
NanoCode012 committed Jan 5, 2024
1 parent 63fb3eb commit fec2590
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion src/axolotl/utils/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,10 @@ def load_model(

# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so we need to
# convert them back to fp16/bf16 for flash-attn compatibility.
if needs_fa2_dtype or (cfg.flash_attention and cfg.is_llama_derived_model):
if needs_fa2_dtype or (
cfg.flash_attention
and (cfg.is_llama_derived_model or cfg.is_mistral_derived_model)
):
LOG.info("converting modules to %s for flash attention", cfg.torch_dtype)
for name, module in model.named_modules():
if "norm" in name:
Expand Down

0 comments on commit fec2590

Please sign in to comment.