Skip to content

[Bugfix] Fix modality limits in vision language example #17721

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 6, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 36 additions & 36 deletions examples/offline_inference/vision_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def run_aria(questions: list[str], modality: str) -> ModelRequestData:
max_model_len=4096,
max_num_seqs=2,
dtype="bfloat16",
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [(f"<|im_start|>user\n<fim_prefix><|img|><fim_suffix>{question}"
Expand All @@ -71,7 +71,7 @@ def run_aya_vision(questions: list[str], modality: str) -> ModelRequestData:
max_model_len=2048,
max_num_seqs=2,
mm_processor_kwargs={"crop_to_patches": True},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)
prompts = [
f"<|START_OF_TURN_TOKEN|><|USER_TOKEN|><image>{question}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
Expand All @@ -92,7 +92,7 @@ def run_blip2(questions: list[str], modality: str) -> ModelRequestData:
prompts = [f"Question: {question} Answer:" for question in questions]
engine_args = EngineArgs(
model="Salesforce/blip2-opt-6.7b",
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -110,7 +110,7 @@ def run_chameleon(questions: list[str], modality: str) -> ModelRequestData:
model="facebook/chameleon-7b",
max_model_len=4096,
max_num_seqs=2,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -130,7 +130,7 @@ def run_deepseek_vl2(questions: list[str], modality: str) -> ModelRequestData:
max_model_len=4096,
max_num_seqs=2,
hf_overrides={"architectures": ["DeepseekVLV2ForCausalLM"]},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [
Expand All @@ -155,7 +155,7 @@ def run_florence2(questions: list[str], modality: str) -> ModelRequestData:
max_num_seqs=2,
trust_remote_code=True,
dtype="bfloat16",
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = ["<MORE_DETAILED_CAPTION>" for _ in questions]
Expand All @@ -175,7 +175,7 @@ def run_fuyu(questions: list[str], modality: str) -> ModelRequestData:
model="adept/fuyu-8b",
max_model_len=2048,
max_num_seqs=2,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -194,7 +194,7 @@ def run_gemma3(questions: list[str], modality: str) -> ModelRequestData:
max_model_len=2048,
max_num_seqs=2,
mm_processor_kwargs={"do_pan_and_scan": True},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [("<bos><start_of_turn>user\n"
Expand All @@ -219,7 +219,7 @@ def run_glm4v(questions: list[str], modality: str) -> ModelRequestData:
trust_remote_code=True,
enforce_eager=True,
hf_overrides={"architectures": ["GLM4VForCausalLM"]},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [
Expand All @@ -246,7 +246,7 @@ def run_h2ovl(questions: list[str], modality: str) -> ModelRequestData:
model=model_name,
trust_remote_code=True,
max_model_len=8192,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

tokenizer = AutoTokenizer.from_pretrained(model_name,
Expand Down Expand Up @@ -287,7 +287,7 @@ def run_idefics3(questions: list[str], modality: str) -> ModelRequestData:
"longest_edge": 3 * 364
},
},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)
prompts = [(
f"<|begin_of_text|>User:<image>{question}<end_of_utterance>\nAssistant:"
Expand All @@ -314,7 +314,7 @@ def run_smolvlm(questions: list[str], modality: str) -> ModelRequestData:
"longest_edge": 384
},
},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)
prompts = [
(f"<|im_start|>User:<image>{question}<end_of_utterance>\nAssistant:")
Expand All @@ -337,7 +337,7 @@ def run_internvl(questions: list[str], modality: str) -> ModelRequestData:
model=model_name,
trust_remote_code=True,
max_model_len=4096,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

tokenizer = AutoTokenizer.from_pretrained(model_name,
Expand Down Expand Up @@ -378,7 +378,7 @@ def run_kimi_vl(questions: list[str], modality: str) -> ModelRequestData:
model="moonshotai/Kimi-VL-A3B-Instruct",
trust_remote_code=True,
max_model_len=4096,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -398,7 +398,7 @@ def run_llava(questions: list[str], modality: str) -> ModelRequestData:
engine_args = EngineArgs(
model="llava-hf/llava-1.5-7b-hf",
max_model_len=4096,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -415,7 +415,7 @@ def run_llava_next(questions: list[str], modality: str) -> ModelRequestData:
engine_args = EngineArgs(
model="llava-hf/llava-v1.6-mistral-7b-hf",
max_model_len=8192,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -437,7 +437,7 @@ def run_llava_next_video(questions: list[str],
model="llava-hf/LLaVA-NeXT-Video-7B-hf",
max_model_len=8192,
max_num_seqs=2,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand Down Expand Up @@ -465,7 +465,7 @@ def run_llava_onevision(questions: list[str],
engine_args = EngineArgs(
model="llava-hf/llava-onevision-qwen2-7b-ov-hf",
max_model_len=16384,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -488,7 +488,7 @@ def run_mantis(questions: list[str], modality: str) -> ModelRequestData:
model="TIGER-Lab/Mantis-8B-siglip-llama3",
max_model_len=4096,
hf_overrides={"architectures": ["MantisForConditionalGeneration"]},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)
stop_token_ids = [128009]

Expand Down Expand Up @@ -529,7 +529,7 @@ def run_minicpmv_base(questions: list[str], modality: str, model_name):
max_model_len=4096,
max_num_seqs=2,
trust_remote_code=True,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)
# NOTE The stop_token_ids are different for various versions of MiniCPM-V
# 2.0
Expand Down Expand Up @@ -584,7 +584,7 @@ def run_mistral3(questions: list[str], modality: str) -> ModelRequestData:
max_model_len=8192,
max_num_seqs=2,
tensor_parallel_size=2,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [f"<s>[INST]{question}\n[IMG][/INST]" for question in questions]
Expand All @@ -610,7 +610,7 @@ def run_mllama(questions: list[str], modality: str) -> ModelRequestData:
model=model_name,
max_model_len=8192,
max_num_seqs=2,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

tokenizer = AutoTokenizer.from_pretrained(model_name)
Expand Down Expand Up @@ -645,7 +645,7 @@ def run_llama4(questions: list[str], modality: str) -> ModelRequestData:
max_num_seqs=4,
tensor_parallel_size=8,
gpu_memory_utilization=0.4,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

tokenizer = AutoTokenizer.from_pretrained(model_name)
Expand Down Expand Up @@ -680,7 +680,7 @@ def run_molmo(questions: list[str], modality: str) -> ModelRequestData:
model=model_name,
trust_remote_code=True,
dtype="bfloat16",
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [
Expand All @@ -706,7 +706,7 @@ def run_nvlm_d(questions: list[str], modality: str) -> ModelRequestData:
trust_remote_code=True,
max_model_len=4096,
tensor_parallel_size=4,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

tokenizer = AutoTokenizer.from_pretrained(model_name,
Expand Down Expand Up @@ -738,7 +738,7 @@ def run_ovis2(questions: list[str], modality: str) -> ModelRequestData:
trust_remote_code=True,
dtype="half",
hf_overrides={"architectures": ["Ovis2ForConditionalGeneration"]},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

placeholder = "<image>\n"
Expand All @@ -761,7 +761,7 @@ def run_paligemma(questions: list[str], modality: str) -> ModelRequestData:
prompts = ["caption en" for _ in questions]
engine_args = EngineArgs(
model="google/paligemma-3b-mix-224",
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -778,7 +778,7 @@ def run_paligemma2(questions: list[str], modality: str) -> ModelRequestData:
prompts = ["caption en" for _ in questions]
engine_args = EngineArgs(
model="google/paligemma2-3b-ft-docci-448",
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand Down Expand Up @@ -815,7 +815,7 @@ def run_phi3v(questions: list[str], modality: str) -> ModelRequestData:
max_num_seqs=2,
# Note - mm_processor_kwargs can also be passed to generate/chat calls
mm_processor_kwargs={"num_crops": 16},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand Down Expand Up @@ -849,7 +849,7 @@ def run_phi4mm(questions: list[str], modality: str) -> ModelRequestData:
max_lora_rank=320,
# Note - mm_processor_kwargs can also be passed to generate/chat calls
mm_processor_kwargs={"dynamic_hd": 16},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

return ModelRequestData(
Expand All @@ -870,7 +870,7 @@ def run_pixtral_hf(questions: list[str], modality: str) -> ModelRequestData:
model=model_name,
max_model_len=6144,
max_num_seqs=2,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [f"<s>[INST]{question}\n[IMG][/INST]" for question in questions]
Expand All @@ -891,7 +891,7 @@ def run_qwen_vl(questions: list[str], modality: str) -> ModelRequestData:
max_model_len=1024,
max_num_seqs=2,
hf_overrides={"architectures": ["QwenVLForConditionalGeneration"]},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

prompts = [f"{question}Picture 1: <img></img>\n" for question in questions]
Expand All @@ -916,7 +916,7 @@ def run_qwen2_vl(questions: list[str], modality: str) -> ModelRequestData:
"min_pixels": 28 * 28,
"max_pixels": 1280 * 28 * 28,
},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

if modality == "image":
Expand Down Expand Up @@ -951,7 +951,7 @@ def run_qwen2_5_vl(questions: list[str], modality: str) -> ModelRequestData:
"max_pixels": 1280 * 28 * 28,
"fps": 1,
},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

if modality == "image":
Expand Down Expand Up @@ -985,7 +985,7 @@ def run_qwen2_5_omni(questions: list[str], modality: str):
"max_pixels": 1280 * 28 * 28,
"fps": [1],
},
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

if modality == "image":
Expand Down Expand Up @@ -1018,7 +1018,7 @@ def run_skyworkr1v(questions: list[str], modality: str) -> ModelRequestData:
model=model_name,
trust_remote_code=True,
max_model_len=4096,
limit_mm_per_prompt={"image": 1},
limit_mm_per_prompt={modality: 1},
)

tokenizer = AutoTokenizer.from_pretrained(model_name,
Expand Down