ci, tests : use cmake to download models and remove libcurl dependency (#18791)

* ci, tests : use cmake to download models and remove libcurl dependency
* llama_dl_model -> llama_download_model
* use EXPECTED_HASH for robust model downloading
* Move llama_download_model to cmake/common.cmake

Signed-off-by: Adrien Gallouët <angt@huggingface.co>
This commit is contained in:
Adrien Gallouët 2026-01-14 07:46:27 +01:00 committed by GitHub
parent 6e36299b47
commit f709c7a33f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 41 additions and 28 deletions

View File

@ -92,7 +92,7 @@ jobs:
id: cmake_test
run: |
cd build
ctest -L 'main|curl' --verbose --timeout 900
ctest -L main --verbose --timeout 900
macOS-latest-cmake-x64:
runs-on: macos-15-intel
@ -237,7 +237,7 @@ jobs:
id: cmake_test
run: |
cd build
ctest -L 'main|curl' --verbose --timeout 900
ctest -L main --verbose --timeout 900
- name: Test llama2c conversion
id: llama2c_test
@ -1499,7 +1499,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install build-essential libcurl4-openssl-dev
sudo apt-get install build-essential
- name: Test
id: ggml-ci
@ -1525,7 +1525,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install build-essential libcurl4-openssl-dev
sudo apt-get install build-essential
- name: Test
id: ggml-ci
@ -1551,7 +1551,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install build-essential libcurl4-openssl-dev
sudo apt-get install build-essential
- name: Test
id: ggml-ci
@ -1577,7 +1577,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install build-essential libcurl4-openssl-dev
sudo apt-get install build-essential
- name: Test
id: ggml-ci
@ -1603,7 +1603,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install build-essential libcurl4-openssl-dev
sudo apt-get install build-essential
- name: Test
id: ggml-ci
@ -1767,7 +1767,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install -y build-essential libcurl4-openssl-dev
sudo apt-get install -y build-essential
- name: Test
id: ggml-ci
@ -1853,7 +1853,7 @@ jobs:
id: cmake_test
run: |
cd build
ctest -L 'main|curl' --verbose --timeout 900
ctest -L main --verbose --timeout 900
- name: Test llama2c conversion
id: llama2c_test
@ -2129,7 +2129,6 @@ jobs:
sudo DEBIAN_FRONTEND=noninteractive NEEDRESTART_MODE=a \
apt-get install -y \
build-essential \
libcurl4-openssl-dev \
python3-venv \
gpg \
wget \

View File

@ -45,7 +45,7 @@ sd=`dirname $0`
cd $sd/../
SRC=`pwd`
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=${LLAMA_FATAL_WARNINGS:-ON} -DLLAMA_CURL=ON -DGGML_SCHED_NO_REALLOC=ON"
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=${LLAMA_FATAL_WARNINGS:-ON} -DLLAMA_CURL=OFF -DGGML_SCHED_NO_REALLOC=ON"
if [ ! -z ${GG_BUILD_METAL} ]; then
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"

View File

@ -33,3 +33,25 @@ function(llama_add_compile_flags)
endif()
endif()
endfunction()
function(llama_download_model NAME HASH)
set(DEST "${CMAKE_BINARY_DIR}/${NAME}")
get_filename_component(DEST_DIR "${DEST}" DIRECTORY)
file(MAKE_DIRECTORY "${DEST_DIR}")
if(NOT EXISTS "${DEST}")
message(STATUS "Downloading ${NAME} from ggml-org/models...")
endif()
file(DOWNLOAD
"https://huggingface.co/ggml-org/models/resolve/main/${NAME}?download=true"
"${DEST}"
TLS_VERIFY ON
EXPECTED_HASH ${HASH}
STATUS status
)
list(GET status 0 code)
if(NOT code EQUAL 0)
list(GET status 1 msg)
message(FATAL_ERROR "Failed to download ${NAME}: ${msg}")
endif()
set(LLAMA_DOWNLOAD_MODEL "${DEST}" PARENT_SCOPE)
endfunction()

View File

@ -6,10 +6,8 @@ target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TEST_TARGET test-eval-callback)
if(NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
add_test(NAME ${TEST_TARGET}
COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0)
llama_download_model("tinyllamas/stories15M-q4_0.gguf" SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739)
else()
add_test(NAME ${TEST_TARGET}
COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K-be.gguf --model stories260K-be.gguf --prompt hello --seed 42 -ngl 0)
llama_download_model("tinyllamas/stories15M-be.Q4_0.gguf" SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d)
endif()
set_property(TEST ${TEST_TARGET} PROPERTY LABELS eval-callback curl)
add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback -m "${LLAMA_DOWNLOAD_MODEL}" --prompt hello --seed 42 -ngl 0)

View File

@ -202,15 +202,13 @@ llama_build_and_test(
llama_build_and_test(test-regex-partial.cpp)
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
llama_download_model("tinyllamas/stories15M-q4_0.gguf" SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739)
else()
llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-be.Q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
llama_download_model("tinyllamas/stories15M-be.Q4_0.gguf" SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d)
endif()
llama_build_and_test(test-thread-safety.cpp ARGS -m "${LLAMA_DOWNLOAD_MODEL}" -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
if (NOT WIN32)
llama_build_and_test(test-arg-parser.cpp)
endif()
llama_build_and_test(test-arg-parser.cpp)
if (NOT LLAMA_SANITIZE_ADDRESS AND NOT GGML_SCHED_NO_REALLOC)
# TODO: repair known memory leaks
@ -225,11 +223,7 @@ llama_build_and_test(test-backend-sampler.cpp LABEL "model")
# Test for state restore with fragmented KV cache
# Requires a model, uses same args pattern as test-thread-safety
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf)
else()
llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -hf ggml-org/models -hff tinyllamas/stories15M-be.Q4_0.gguf)
endif()
llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -m "${LLAMA_DOWNLOAD_MODEL}")
if (NOT GGML_BACKEND_DL)
# these tests use the backends directly and cannot be built with dynamic loading

View File

@ -173,7 +173,7 @@ int main(void) {
assert(params.cpuparams.n_threads == 1010);
#endif // _WIN32
printf("test-arg-parser: test curl-related functions\n\n");
printf("test-arg-parser: test download functions\n\n");
const char * GOOD_URL = "http://ggml.ai/";
const char * BAD_URL = "http://ggml.ai/404";