Skip to content

Commit

Permalink
revert some changes now that caching is fixed
Browse files Browse the repository at this point in the history
  • Loading branch information
winglian committed Jan 9, 2024
1 parent f01ca4f commit a8d3c31
Show file tree
Hide file tree
Showing 12 changed files with 2 additions and 21 deletions.
4 changes: 2 additions & 2 deletions docker/Dockerfile-tests
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ RUN git fetch origin +$GITHUB_REF && \

# If AXOLOTL_EXTRAS is set, append it in brackets
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
pip install -e .[deepspeed,flash-attn,$AXOLOTL_EXTRAS]; \
pip install -e .[deepspeed,flash-attn,mamba-ssm,$AXOLOTL_EXTRAS]; \
else \
pip install -e .[deepspeed,flash-attn]; \
pip install -e .[deepspeed,flash-attn,mamba-ssm]; \
fi

# So we can test the Docker image
Expand Down
1 change: 0 additions & 1 deletion tests/e2e/test_fused_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def test_fft_packing(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "JackFram/llama-68m",
"flash_attention": True,
"flash_attn_fuse_qkv": True,
Expand Down
3 changes: 0 additions & 3 deletions tests/e2e/test_lora_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def test_lora(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "JackFram/llama-68m",
"tokenizer_type": "LlamaTokenizer",
"sequence_len": 1024,
Expand Down Expand Up @@ -74,7 +73,6 @@ def test_lora_packing(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "JackFram/llama-68m",
"tokenizer_type": "LlamaTokenizer",
"sequence_len": 1024,
Expand Down Expand Up @@ -124,7 +122,6 @@ def test_lora_gptq(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "TheBlokeAI/jackfram_llama-68m-GPTQ",
"model_type": "AutoModelForCausalLM",
"tokenizer_type": "LlamaTokenizer",
Expand Down
1 change: 0 additions & 1 deletion tests/e2e/test_mamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ def test_fft(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "state-spaces/mamba-130m",
"model_type": "MambaLMHeadModel",
"tokenizer_type": "AutoTokenizer",
Expand Down
2 changes: 0 additions & 2 deletions tests/e2e/test_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def test_lora(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "openaccess-ai-collective/tiny-mistral",
"flash_attention": True,
"sequence_len": 1024,
Expand Down Expand Up @@ -77,7 +76,6 @@ def test_ft(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "openaccess-ai-collective/tiny-mistral",
"flash_attention": True,
"sequence_len": 1024,
Expand Down
2 changes: 0 additions & 2 deletions tests/e2e/test_mistral_samplepack.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def test_lora_packing(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "openaccess-ai-collective/tiny-mistral",
"flash_attention": True,
"sample_packing": True,
Expand Down Expand Up @@ -78,7 +77,6 @@ def test_ft_packing(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "openaccess-ai-collective/tiny-mistral",
"flash_attention": True,
"sample_packing": True,
Expand Down
2 changes: 0 additions & 2 deletions tests/e2e/test_mixtral.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def test_qlora(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "hf-internal-testing/Mixtral-tiny",
"tokenizer_config": "mistralai/Mixtral-8x7B-v0.1",
"flash_attention": True,
Expand Down Expand Up @@ -74,7 +73,6 @@ def test_ft(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "hf-internal-testing/Mixtral-tiny",
"tokenizer_config": "mistralai/Mixtral-8x7B-v0.1",
"flash_attention": True,
Expand Down
2 changes: 0 additions & 2 deletions tests/e2e/test_mixtral_samplepack.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def test_qlora(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "hf-internal-testing/Mixtral-tiny",
"tokenizer_config": "mistralai/Mixtral-8x7B-v0.1",
"flash_attention": True,
Expand Down Expand Up @@ -79,7 +78,6 @@ def test_ft(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "hf-internal-testing/Mixtral-tiny",
"tokenizer_config": "mistralai/Mixtral-8x7B-v0.1",
"flash_attention": True,
Expand Down
2 changes: 0 additions & 2 deletions tests/e2e/test_model_patches.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ class TestModelPatches(unittest.TestCase):
def test_mixtral_multipack(self, temp_dir):
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "hf-internal-testing/Mixtral-tiny",
"tokenizer_config": "mistralai/Mixtral-8x7B-v0.1",
"flash_attention": True,
Expand Down Expand Up @@ -65,7 +64,6 @@ def test_mixtral_multipack(self, temp_dir):
def test_mistral_multipack(self, temp_dir):
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "openaccess-ai-collective/tiny-mistral",
"flash_attention": True,
"sample_packing": True,
Expand Down
2 changes: 0 additions & 2 deletions tests/e2e/test_phi.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def test_phi2_ft(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "microsoft/phi-2",
"trust_remote_code": True,
"model_type": "AutoModelForCausalLM",
Expand Down Expand Up @@ -85,7 +84,6 @@ def test_ft_packed(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "microsoft/phi-2",
"trust_remote_code": True,
"model_type": "PhiForCausalLM",
Expand Down
1 change: 0 additions & 1 deletion tests/e2e/test_resume.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def test_resume_qlora(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"wandb_disabled": True,
"base_model": "JackFram/llama-68m",
"tokenizer_type": "LlamaTokenizer",
"sequence_len": 1024,
Expand Down
1 change: 0 additions & 1 deletion tests/test_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,6 @@ def test_add_tokens_adapter(self):
validate_config(cfg)


@pytest.mark.skip(reason="skip wandb validation for docker e2e")
class ValidationWandbTest(ValidationTest):
"""
Validation test for wandb
Expand Down

0 comments on commit a8d3c31

Please sign in to comment.