From 1c071a703c2358156a745ae1f050f109fbd0fc8b Mon Sep 17 00:00:00 2001 From: Merve Noyan Date: Fri, 19 Jul 2024 11:13:04 +0300 Subject: [PATCH 1/2] Fixes --- docs/source/en/model_doc/chameleon.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/docs/source/en/model_doc/chameleon.md b/docs/source/en/model_doc/chameleon.md index fb524b324794f8..6505ddaf4a7ba7 100644 --- a/docs/source/en/model_doc/chameleon.md +++ b/docs/source/en/model_doc/chameleon.md @@ -34,13 +34,13 @@ being competitive with models such as Mixtral 8x7B and Gemini-Pro, and performs generation, all in a single model. It also matches or exceeds the performance of much larger models, including Gemini Pro and GPT-4V, according to human judgments on a new long-form mixed-modal generation evaluation, where either the prompt or outputs contain mixed sequences of both images and -text. Chameleon marks a significant step forward in a unified modeling of full multimodal documents* +text. Chameleon marks a significant step forward in unified modeling of full multimodal documents* drawing - Chameleon incorporates a vector quantizer module to transform images into discrete tokens. That also enables image geenration using an auto-regressive transformer. Taken from the original paper. + Chameleon incorporates a vector quantizer module to transform images into discrete tokens. That also enables image generation using an auto-regressive transformer. Taken from the original paper. This model was contributed by [joaogante](https://huggingface.co/joaogante) and [RaushanTurganbay](https://huggingface.co/RaushanTurganbay). The original code can be found [here](https://github.com/facebookresearch/chameleon). @@ -61,6 +61,7 @@ The original code can be found [here](https://github.com/facebookresearch/chamel ### Single image inference +Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token. Here's how to load the model and perform inference in half-precision (`torch.float16`): ```python @@ -69,8 +70,8 @@ import torch from PIL import Image import requests -processor = ChameleonProcessor.from_pretrained("meta-chameleon") -model = ChameleonForConditionalGeneration.from_pretrained("meta-chameleon", torch_dtype=torch.float16, device_map="auto") +processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") +model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="auto") # prepare image and text prompt url = "https://bjiujitsu.com/wp-content/uploads/2021/01/jiu_jitsu_belt_white_1.jpg" @@ -94,8 +95,8 @@ import torch from PIL import Image import requests -processor = ChameleonProcessor.from_pretrained("meta-chameleon") -model = ChameleonForConditionalGeneration.from_pretrained("meta-chameleon", torch_dtype=torch.float16, device_map="auto") +processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") +model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="auto") # Get three different images url = "https://www.ilankelman.org/stopsigns/australia.jpg" @@ -138,7 +139,7 @@ quantization_config = BitsAndBytesConfig( bnb_4bit_compute_dtype=torch.float16, ) -model = ChameleonForConditionalGeneration.from_pretrained("meta-chameleon", quantization_config=quantization_config, device_map="auto") +model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="auto") ``` ### Use Flash-Attention 2 and SDPA to further speed-up generation @@ -148,6 +149,7 @@ The models supports both, Flash-Attention 2 and PyTorch's [`torch.nn.functional. ```python from transformers import ChameleonForConditionalGeneration +model_id = "facebook/chameleon-7b" model = ChameleonForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, From 5e0c823f6b09a448572549f46027170d5f351a6e Mon Sep 17 00:00:00 2001 From: Merve Noyan Date: Fri, 19 Jul 2024 11:50:11 +0300 Subject: [PATCH 2/2] Let's not use auto --- docs/source/en/model_doc/chameleon.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/en/model_doc/chameleon.md b/docs/source/en/model_doc/chameleon.md index fecd64809aaca9..9b316c772e1041 100644 --- a/docs/source/en/model_doc/chameleon.md +++ b/docs/source/en/model_doc/chameleon.md @@ -71,7 +71,7 @@ from PIL import Image import requests processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") -model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="auto") +model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="cuda") # prepare image and text prompt url = 'http://images.cocodataset.org/val2017/000000039769.jpg' @@ -97,7 +97,7 @@ import requests processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") -model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="auto") +model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="cuda") # Get three different images url = "https://www.ilankelman.org/stopsigns/australia.jpg" @@ -140,7 +140,7 @@ quantization_config = BitsAndBytesConfig( bnb_4bit_compute_dtype=torch.float16, ) -model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="auto") +model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="cuda") ``` ### Use Flash-Attention 2 and SDPA to further speed-up generation