Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

more sane defaults for openllama 3b used for quickstarts #602

Merged
merged 3 commits into from
Sep 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions examples/openllama-3b/config.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
base_model: openlm-research/open_llama_3b
base_model_config: openlm-research/open_llama_3b
base_model: openlm-research/open_llama_3b_v2
base_model_config: openlm-research/open_llama_3b_v2
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: false
Expand All @@ -13,8 +13,8 @@ dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter:
lora_model_dir:
sequence_len: 256
max_packed_sequence_len:
sequence_len: 1024
sample_packing: true
lora_r:
lora_alpha:
lora_dropout:
Expand All @@ -29,11 +29,11 @@ wandb_log_model:
output_dir: ./openllama-out
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.00001
learning_rate: 0.000003
train_on_inputs: false
group_by_length: false
float16: true
Expand All @@ -45,12 +45,12 @@ early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
xformers_attention:
flash_attention: true
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 50
warmup_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:
Expand Down
24 changes: 12 additions & 12 deletions examples/openllama-3b/lora.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
base_model: openlm-research/open_llama_3b
base_model_config: openlm-research/open_llama_3b
base_model: openlm-research/open_llama_3b_v2
base_model_config: openlm-research/open_llama_3b_v2
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: true
Expand All @@ -13,8 +13,8 @@ dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter: lora
lora_model_dir:
sequence_len: 256
max_packed_sequence_len:
sequence_len: 1024
sample_packing: true
lora_r: 8
lora_alpha: 16
lora_dropout: 0.0
Expand All @@ -33,9 +33,9 @@ wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./lora-out
batch_size: 16
micro_batch_size: 4
num_epochs: 3
gradient_accumulation_steps: 1
micro_batch_size: 2
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
Expand All @@ -50,16 +50,16 @@ early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
xformers_attention:
flash_attention: true
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 50
warmup_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:
weight_decay: 0.0
weight_decay: 0.1
fsdp:
fsdp_config:
special_tokens:
Expand Down
30 changes: 15 additions & 15 deletions examples/openllama-3b/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
base_model: openlm-research/open_llama_3b
base_model_config: openlm-research/open_llama_3b
base_model: openlm-research/open_llama_3b_v2
base_model_config: openlm-research/open_llama_3b_v2
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: false
Expand All @@ -13,8 +13,8 @@ dataset_prepared_path: last_run_prepared
val_set_size: 0.01
adapter: qlora
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len: 2048
sequence_len: 1024
sample_packing: true
lora_r: 8
lora_alpha: 32
lora_dropout: 0.05
Expand All @@ -27,33 +27,33 @@ wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./qlora-out
batch_size: 4
micro_batch_size: 4
num_epochs: 2
gradient_accumulation_steps: 1
micro_batch_size: 2
num_epochs: 4
optimizer: paged_adamw_32bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: true
fp16: false
tf32: true
bf16: false
fp16: true
tf32: false
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
xformers_attention:
flash_attention: true
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 20
warmup_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:
weight_decay: 0.0
weight_decay: 0.1
fsdp:
fsdp_config:
special_tokens:
Expand Down