From 4fac7d4eaa4528a3f814732e8f1e60ed4c11566a Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 30 Apr 2025 18:53:56 +0200 Subject: [PATCH] add test --- examples/llava/README.md | 10 ++++++++++ examples/llava/tests.sh | 16 +++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/examples/llava/README.md b/examples/llava/README.md index f58d9de710..cd71116c18 100644 --- a/examples/llava/README.md +++ b/examples/llava/README.md @@ -34,6 +34,16 @@ llama-mtmd-cli -hf ggml-org/SmolVLM2-500M-Video-Instruct-GGUF # Pixtral 12B llama-mtmd-cli -hf ggml-org/pixtral-12b-GGUF + +# Qwen 2 VL +llama-mtmd-cli -hf ggml-org/Qwen2-VL-2B-Instruct +llama-mtmd-cli -hf ggml-org/Qwen2-VL-7B-Instruct + +# Qwen 2.5 VL +llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-3B-Instruct +llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-7B-Instruct +llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-32B-Instruct +llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-72B-Instruct ``` ## How it works and what is `mmproj`? diff --git a/examples/llava/tests.sh b/examples/llava/tests.sh index 75604315cf..5030a0bead 100755 --- a/examples/llava/tests.sh +++ b/examples/llava/tests.sh @@ -36,12 +36,6 @@ add_test() { arr_tmpl+=("$tmpl") } -add_test_big() { - if [ "$RUN_BIG_TESTS" = true ]; then - add_test "$@" - fi -} - add_test "llama-mtmd-cli" "ggml-org/SmolVLM-500M-Instruct-GGUF:Q8_0" add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M" add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0" @@ -58,7 +52,15 @@ add_test "llama-mtmd-cli" "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M" add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" # to test the big models, run: ./tests.sh big -add_test_big "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M" +if [ "$RUN_BIG_TESTS" = true ]; then + add_test "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M" + add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-2B-Instruct:Q4_K_M" + add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-7B-Instruct:Q4_K_M" + add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct:Q4_K_M" + add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-7B-Instruct:Q4_K_M" + add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-32B-Instruct:Q4_K_M" + # add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-72B-Instruct:Q4_K_M" # too big +fi # these models always give the wrong answer, not sure why # add_test "llama-mtmd-cli" "ggml-org/SmolVLM-Instruct-GGUF:Q4_K_M"