diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 09a1769348..6c7ab71143 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -81,8 +81,7 @@ jobs: cmake -B build \ -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ - -DLLAMA_CURL=OFF \ - -DLLAMA_BORINGSSL=ON \ + -DLLAMA_BUILD_BORINGSSL=ON \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=OFF \ -DGGML_METAL_SHADER_DEBUG=ON \ @@ -120,8 +119,7 @@ jobs: cmake -B build \ -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ - -DLLAMA_CURL=OFF \ - -DLLAMA_BORINGSSL=ON \ + -DLLAMA_BUILD_BORINGSSL=ON \ -DGGML_METAL=OFF \ -DGGML_RPC=ON \ -DCMAKE_OSX_DEPLOYMENT_TARGET=13.3 @@ -1023,7 +1021,7 @@ jobs: id: cmake_build run: | cmake -S . -B build ${{ matrix.defines }} ` - -DLLAMA_CURL=OFF -DLLAMA_BORINGSSL=ON + -DLLAMA_BUILD_BORINGSSL=ON cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} - name: Add libopenblas.dll @@ -1128,8 +1126,7 @@ jobs: call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64 cmake -S . -B build -G "Ninja Multi-Config" ^ -DLLAMA_BUILD_SERVER=ON ^ - -DLLAMA_CURL=OFF ^ - -DLLAMA_BORINGSSL=ON ^ + -DLLAMA_BUILD_BORINGSSL=ON ^ -DGGML_NATIVE=OFF ^ -DGGML_BACKEND_DL=ON ^ -DGGML_CPU_ALL_VARIANTS=ON ^ @@ -1236,8 +1233,7 @@ jobs: -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/opt/rocm-${{ env.ROCM_VERSION }}/include/" ` -DCMAKE_BUILD_TYPE=Release ` - -DLLAMA_CURL=OFF ` - -DLLAMA_BORINGSSL=ON ` + -DLLAMA_BUILD_BORINGSSL=ON ` -DROCM_DIR="${env:HIP_PATH}" ` -DGGML_HIP=ON ` -DGGML_HIP_ROCWMMA_FATTN=ON ` diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index bd8302197e..99d05226ba 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -74,7 +74,6 @@ jobs: run: | cmake -B build \ -DLLAMA_BUILD_BORINGSSL=ON \ - -DLLAMA_CURL=OFF \ -DGGML_SCHED_NO_REALLOC=ON \ -DGGML_SANITIZE_ADDRESS=${{ matrix.sanitizer == 'ADDRESS' }} \ -DGGML_SANITIZE_THREAD=${{ matrix.sanitizer == 'THREAD' }} \ @@ -121,7 +120,7 @@ jobs: - name: Build id: cmake_build run: | - cmake -B build -DLLAMA_BUILD_BORINGSSL=ON -DGGML_SCHED_NO_REALLOC=ON -DLLAMA_CURL=OFF + cmake -B build -DLLAMA_BUILD_BORINGSSL=ON -DGGML_SCHED_NO_REALLOC=ON cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} --target llama-server - name: Python setup diff --git a/CMakeLists.txt b/CMakeLists.txt index e69a12cae2..55f3d594db 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -114,9 +114,6 @@ option(LLAMA_TESTS_INSTALL "llama: install tests" ON) # 3rd party libs option(LLAMA_HTTPLIB "llama: httplib for downloading functionality" ON) option(LLAMA_OPENSSL "llama: use openssl to support HTTPS" ON) -option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF) -option(LLAMA_BORINGSSL "llama: use boringssl to support HTTPS" ON) -option(LLAMA_LIBRESSL "llama: use libressl to support HTTPS" OFF) option(LLAMA_LLGUIDANCE "llama-common: include LLGuidance library for structured output in common utils" OFF) # deprecated