llama: update the library name in JNI and CMake project
This commit is contained in:
parent
6dfdc2c105
commit
96817ae667
|
|
@ -2,7 +2,7 @@ package com.example.llama.di
|
|||
|
||||
import android.content.Context
|
||||
import android.llama.cpp.InferenceEngine
|
||||
import android.llama.cpp.KleidiLlama
|
||||
import android.llama.cpp.AiChat
|
||||
import android.llama.cpp.TierDetection
|
||||
import android.llama.cpp.gguf.GgufMetadataReader
|
||||
import com.example.llama.data.db.AppDatabase
|
||||
|
|
@ -76,7 +76,7 @@ internal abstract class AppModule {
|
|||
return if (USE_STUB_ENGINE) {
|
||||
StubInferenceEngine()
|
||||
} else {
|
||||
KleidiLlama.getInferenceEngine(context)
|
||||
AiChat.getInferenceEngine(context)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -85,7 +85,7 @@ internal abstract class AppModule {
|
|||
return if (USE_STUB_ENGINE) {
|
||||
StubTierDetection
|
||||
} else {
|
||||
KleidiLlama.getTierDetection(context)
|
||||
AiChat.getTierDetection(context)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
cmake_minimum_required(VERSION 3.31.6)
|
||||
|
||||
project("kleidi-llama" VERSION 1.0.0 LANGUAGES C CXX)
|
||||
project("ai-chat" VERSION 1.0.0 LANGUAGES C CXX)
|
||||
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_C_STANDARD_REQUIRED true)
|
||||
|
|
@ -17,8 +17,8 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}" CACHE STRING "" FORCE)
|
|||
add_subdirectory(
|
||||
${CMAKE_CURRENT_LIST_DIR}/../../../../../../include/cpu_features
|
||||
${CMAKE_BINARY_DIR}/cpu_features_build)
|
||||
add_library(kleidi-llama-cpu-detector SHARED cpu_detector.cpp)
|
||||
target_link_libraries(kleidi-llama-cpu-detector
|
||||
add_library(cpu-detector SHARED cpu_detector.cpp)
|
||||
target_link_libraries(cpu-detector
|
||||
PRIVATE CpuFeatures::cpu_features
|
||||
android
|
||||
log)
|
||||
|
|
@ -46,7 +46,7 @@ set(LLAMA_SRC ${CMAKE_CURRENT_LIST_DIR}/../../../../../../)
|
|||
add_subdirectory(${LLAMA_SRC} build-llama)
|
||||
|
||||
add_library(${CMAKE_PROJECT_NAME} SHARED
|
||||
kleidi-llama.cpp)
|
||||
kleidi_llama.cpp)
|
||||
|
||||
target_compile_definitions(${CMAKE_PROJECT_NAME} PRIVATE
|
||||
GGML_SYSTEM_ARCH=${GGML_SYSTEM_ARCH}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ static std::string join(const std::vector<T> &values, const std::string &delim)
|
|||
/**
|
||||
* Logging utils
|
||||
*/
|
||||
#define TAG "kleidi-llama"
|
||||
#define TAG "ai-chat"
|
||||
#define LOGv(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
#define LOGd(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
||||
#define LOGi(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
|
|
@ -5,10 +5,10 @@ import android.llama.cpp.internal.InferenceEngineImpl
|
|||
import android.llama.cpp.internal.TierDetectionImpl
|
||||
|
||||
/**
|
||||
* Main entry point for the Llama Android library.
|
||||
* Main entry point for the Ai Chat library.
|
||||
* This is the only class that should be used by library consumers.
|
||||
*/
|
||||
object KleidiLlama {
|
||||
object AiChat {
|
||||
/**
|
||||
* Get the inference engine single instance.
|
||||
*/
|
||||
|
|
@ -9,7 +9,7 @@ interface TierDetection {
|
|||
}
|
||||
|
||||
/**
|
||||
* ARM optimization tiers supported by the Kleidi-Llama library.
|
||||
* ARM optimization tiers supported by this library.
|
||||
* Higher tiers provide better performance on supported hardware.
|
||||
*/
|
||||
enum class LLamaTier(val rawValue: Int, val description: String) {
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ internal class InferenceEngineImpl private constructor(
|
|||
}
|
||||
_state.value = InferenceEngine.State.Initializing
|
||||
Log.i(TAG, "Loading native library...")
|
||||
System.load(File(nativeLibDir, "libkleidi-llama.so").absolutePath)
|
||||
System.load(File(nativeLibDir, "libai-chat.so").absolutePath)
|
||||
init(nativeLibDir)
|
||||
_state.value = InferenceEngine.State.Initialized
|
||||
Log.i(TAG, "Native library loaded! System info: \n${systemInfo()}")
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ internal class TierDetectionImpl private constructor(
|
|||
private val TAG = TierDetectionImpl::class.simpleName
|
||||
|
||||
// CPU feature detection preferences
|
||||
private const val DATASTORE_CPU_DETECTION = "llama_cpu_detection"
|
||||
private const val DATASTORE_CPU_DETECTION = "cpu-detection"
|
||||
private const val DATASTORE_VERSION = 1
|
||||
private val Context.llamaTierDataStore: DataStore<Preferences>
|
||||
by preferencesDataStore(name = DATASTORE_CPU_DETECTION)
|
||||
|
|
@ -95,7 +95,7 @@ internal class TierDetectionImpl private constructor(
|
|||
private fun performOptimalTierDetection(): LLamaTier? {
|
||||
try {
|
||||
// Load CPU detection library
|
||||
System.loadLibrary("kleidi-llama-cpu-detector")
|
||||
System.loadLibrary("cpu-detector")
|
||||
Log.i(TAG, "CPU feature detector loaded successfully")
|
||||
|
||||
// Detect optimal tier
|
||||
|
|
|
|||
Loading…
Reference in New Issue