From ead41ff6551ecafdc53b6ac9e9f3811623c884d5 Mon Sep 17 00:00:00 2001 From: Han Yin Date: Wed, 14 May 2025 11:49:13 -0700 Subject: [PATCH] [WIP] llama: disable OpenMP in ABI split since most SoCs are big.LITTLE --- examples/llama.android/llama/src/main/cpp/CMakeLists.txt | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt index c310b6cba5..f125b61567 100644 --- a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +++ b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt @@ -16,11 +16,6 @@ set(CMAKE_C_STANDARD 11 CACHE STRING "" FORCE) set(CMAKE_CXX_STANDARD 17 CACHE STRING "" FORCE) set(CMAKE_POSITION_INDEPENDENT_CODE ON) -# -------------------------------------------------------------------------- -# 0.b — Make the LLVM OpenMP runtime available -# -------------------------------------------------------------------------- -find_package(OpenMP REQUIRED) # NDK’s clang ships libomp.a - # -------------------------------------------------------------------------- # 1. Locate the root of the llama.cpp source tree # (six levels up from this CMakeLists.txt). @@ -48,6 +43,7 @@ function(build_llama_tier tier march) -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} # ---- llama / ggml feature switches ---------------------------- -DGGML_LLAMAFILE=OFF + -DGGML_OPENMP=OFF -DLLAMA_BUILD_COMMON=ON -DLLAMA_CURL=OFF -DBUILD_SHARED_LIBS=OFF # we want static libs to embed @@ -117,7 +113,6 @@ function(build_llama_tier tier march) ggml_core_${tier} # umbrella (brings in few weak deps) ggml_cpu_core_${tier} # back-end & scheduler ggml_base_core_${tier} # core math - OpenMP::OpenMP_CXX android log)