Skip to content

Commit

Permalink
llama : use Q4_K for attn_v for Q2_K_S when n_gqa >= 4 (ggerganov#4996)
Browse files Browse the repository at this point in the history
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
  • Loading branch information
2 people authored and hodlen committed Apr 1, 2024
1 parent d07de0a commit d13fd96
Showing 1 changed file with 6 additions and 1 deletion.
7 changes: 6 additions & 1 deletion llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8477,7 +8477,12 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
}
else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K;
} else if (name.find("attn_v.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
}
Expand Down

0 comments on commit d13fd96

Please sign in to comment.