This commit is contained in:
Xuan Son Nguyen 2025-04-30 18:53:56 +02:00
parent b5e72ed350
commit 4fac7d4eaa
2 changed files with 19 additions and 7 deletions

View File

@ -34,6 +34,16 @@ llama-mtmd-cli -hf ggml-org/SmolVLM2-500M-Video-Instruct-GGUF
# Pixtral 12B
llama-mtmd-cli -hf ggml-org/pixtral-12b-GGUF
# Qwen 2 VL
llama-mtmd-cli -hf ggml-org/Qwen2-VL-2B-Instruct
llama-mtmd-cli -hf ggml-org/Qwen2-VL-7B-Instruct
# Qwen 2.5 VL
llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-3B-Instruct
llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-7B-Instruct
llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-32B-Instruct
llama-mtmd-cli -hf ggml-org/Qwen2.5-VL-72B-Instruct
```
## How it works and what is `mmproj`?

View File

@ -36,12 +36,6 @@ add_test() {
arr_tmpl+=("$tmpl")
}
add_test_big() {
if [ "$RUN_BIG_TESTS" = true ]; then
add_test "$@"
fi
}
add_test "llama-mtmd-cli" "ggml-org/SmolVLM-500M-Instruct-GGUF:Q8_0"
add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0"
@ -58,7 +52,15 @@ add_test "llama-mtmd-cli" "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M"
# to test the big models, run: ./tests.sh big
add_test_big "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M"
if [ "$RUN_BIG_TESTS" = true ]; then
add_test "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-2B-Instruct:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-7B-Instruct:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-7B-Instruct:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-32B-Instruct:Q4_K_M"
# add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-72B-Instruct:Q4_K_M" # too big
fi
# these models always give the wrong answer, not sure why
# add_test "llama-mtmd-cli" "ggml-org/SmolVLM-Instruct-GGUF:Q4_K_M"