View file src/colab/llava.py - Download

# -*- coding: utf-8 -*-
"""llava.ipynb

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1r5WzWBC7zElKEtIB7XOMCkIwmk0ow1AA

https://huggingface.co/docs/transformers/model_doc/llava
"""

!pip install git+https://github.com/huggingface/transformers

!python --version
!for p in transformers pillow requests IPython; do pip list | grep "^$p[ \t]"; done

"""Python 3.10.12

transformers                       4.48.0.dev0
pillow                             11.0.0
requests                           2.32.3


"""

from PIL import Image
import requests
from transformers import AutoProcessor, LlavaForConditionalGeneration
from IPython.display import display

model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf")
processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")

prompt = "\nUSER: What's the content of the image?\nASSISTANT:"
url = "https://www.ilankelman.org/stopsigns/australia.jpg"
image = Image.open(requests.get(url, stream=True).raw)
display(image)

inputs = processor(text=prompt, images=image, return_tensors="pt")

# Generate
# generate_ids = model.generate(**inputs, max_length=30)
generate_ids = model.generate(**inputs, max_new_tokens=10)
result = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
print(result)