Merge branch 'ggml-org:master' into Kimi-Linear
This commit is contained in:
commit
217e7ce45a
|
|
@ -1,30 +0,0 @@
|
|||
name: 'Windows - Setup CURL'
|
||||
description: 'Composite action, to be reused in other workflow'
|
||||
inputs:
|
||||
curl_version:
|
||||
description: 'CURL version'
|
||||
required: false
|
||||
default: '8.6.0_6'
|
||||
architecture:
|
||||
description: 'Architecture of the libcurl to download'
|
||||
required: false
|
||||
default: 'win64'
|
||||
outputs:
|
||||
curl_path:
|
||||
description: "Path to the downloaded libcurl"
|
||||
value: ${{ steps.get_libcurl.outputs.curl_path }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: libCURL
|
||||
id: get_libcurl
|
||||
shell: powershell
|
||||
env:
|
||||
CURL_VERSION: ${{ inputs.curl_version }}
|
||||
ARCHITECTURE: ${{ inputs.architecture }}
|
||||
run: |
|
||||
curl.exe -o $env:RUNNER_TEMP/curl.zip -L "https://curl.se/windows/dl-${env:CURL_VERSION}/curl-${env:CURL_VERSION}-${env:ARCHITECTURE}-mingw.zip"
|
||||
mkdir $env:RUNNER_TEMP/libcurl
|
||||
tar.exe -xvf $env:RUNNER_TEMP/curl.zip --strip-components=1 -C $env:RUNNER_TEMP/libcurl
|
||||
echo "curl_path=$env:RUNNER_TEMP/libcurl" >> $env:GITHUB_OUTPUT
|
||||
|
|
@ -1463,12 +1463,14 @@ jobs:
|
|||
"${{ steps.cann-image.outputs.image }}" \
|
||||
bash -lc '
|
||||
set -e
|
||||
yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake libcurl-devel
|
||||
yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake openssl-devel
|
||||
yum clean all && rm -rf /var/cache/yum
|
||||
git config --global --add safe.directory "/workspace"
|
||||
export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH}
|
||||
cmake -S . -B build \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DGGML_CANN=on \
|
||||
-DSOC_TYPE=${SOC_TYPE}
|
||||
cmake --build build -j $(nproc)
|
||||
|
|
|
|||
|
|
@ -37,13 +37,6 @@ jobs:
|
|||
key: macOS-latest-cmake-arm64
|
||||
evict-old-files: 1d
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
continue-on-error: true
|
||||
run: |
|
||||
brew update
|
||||
brew install curl
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
|
|
@ -52,6 +45,8 @@ jobs:
|
|||
-DCMAKE_INSTALL_RPATH='@loader_path' \
|
||||
-DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
|
||||
-DLLAMA_FATAL_WARNINGS=ON \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_BUILD_BORINGSSL=ON \
|
||||
-DGGML_METAL_USE_BF16=ON \
|
||||
-DGGML_METAL_EMBED_LIBRARY=ON \
|
||||
-DGGML_RPC=ON \
|
||||
|
|
@ -90,13 +85,6 @@ jobs:
|
|||
key: macOS-latest-cmake-x64
|
||||
evict-old-files: 1d
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
continue-on-error: true
|
||||
run: |
|
||||
brew update
|
||||
brew install curl
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
|
|
@ -107,6 +95,8 @@ jobs:
|
|||
-DCMAKE_INSTALL_RPATH='@loader_path' \
|
||||
-DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
|
||||
-DLLAMA_FATAL_WARNINGS=ON \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_BUILD_BORINGSSL=ON \
|
||||
-DGGML_METAL=OFF \
|
||||
-DGGML_RPC=ON \
|
||||
-DCMAKE_OSX_DEPLOYMENT_TARGET=13.3
|
||||
|
|
@ -159,7 +149,7 @@ jobs:
|
|||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential libcurl4-openssl-dev
|
||||
sudo apt-get install build-essential libssl-dev
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
|
|
@ -171,6 +161,8 @@ jobs:
|
|||
-DGGML_NATIVE=OFF \
|
||||
-DGGML_CPU_ALL_VARIANTS=ON \
|
||||
-DLLAMA_FATAL_WARNINGS=ON \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
${{ env.CMAKE_ARGS }}
|
||||
cmake --build build --config Release -j $(nproc)
|
||||
|
||||
|
|
@ -212,7 +204,7 @@ jobs:
|
|||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
|
||||
sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev
|
||||
sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libssl-dev
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
|
|
@ -220,6 +212,8 @@ jobs:
|
|||
cmake -B build \
|
||||
-DCMAKE_INSTALL_RPATH='$ORIGIN' \
|
||||
-DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DGGML_BACKEND_DL=ON \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DGGML_CPU_ALL_VARIANTS=ON \
|
||||
|
|
@ -269,34 +263,24 @@ jobs:
|
|||
run: |
|
||||
choco install ninja
|
||||
|
||||
- name: libCURL
|
||||
id: get_libcurl
|
||||
uses: ./.github/actions/windows-setup-curl
|
||||
with:
|
||||
architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }}
|
||||
|
||||
- name: Build
|
||||
shell: cmd
|
||||
env:
|
||||
CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
|
||||
run: |
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch == 'x64' && 'x64' || 'amd64_arm64' }}
|
||||
cmake -S . -B build -G "Ninja Multi-Config" ^
|
||||
-D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^
|
||||
-DLLAMA_CURL=OFF ^
|
||||
-DLLAMA_BUILD_BORINGSSL=ON ^
|
||||
-DGGML_NATIVE=OFF ^
|
||||
-DGGML_BACKEND_DL=ON ^
|
||||
-DGGML_CPU_ALL_VARIANTS=${{ matrix.arch == 'x64' && 'ON' || 'OFF' }} ^
|
||||
-DGGML_OPENMP=ON ^
|
||||
-DCURL_LIBRARY="%CURL_PATH%/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="%CURL_PATH%/include" ^
|
||||
${{ env.CMAKE_ARGS }}
|
||||
cmake --build build --config Release
|
||||
|
||||
- name: Pack artifacts
|
||||
id: pack_artifacts
|
||||
env:
|
||||
CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
|
||||
run: |
|
||||
Copy-Item $env:CURL_PATH\bin\libcurl-${{ matrix.arch }}.dll .\build\bin\Release\
|
||||
Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.44.35112\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\
|
||||
7z a -snl llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
|
||||
|
||||
|
|
@ -744,12 +728,14 @@ jobs:
|
|||
"${{ steps.cann-image.outputs.image }}" \
|
||||
bash -lc '
|
||||
set -e
|
||||
yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake libcurl-devel
|
||||
yum install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs git gcc gcc-c++ make cmake openssl-devel
|
||||
yum clean all && rm -rf /var/cache/yum
|
||||
git config --global --add safe.directory "/workspace"
|
||||
export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH}
|
||||
cmake -S . -B build \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DGGML_CANN=on \
|
||||
-DSOC_TYPE=${SOC_TYPE}
|
||||
cmake --build build -j $(nproc)
|
||||
|
|
|
|||
|
|
@ -200,6 +200,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
|||
*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)*
|
||||
|
||||
- [AI Sublime Text plugin](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (MIT)
|
||||
- [BonzAI App](https://apps.apple.com/us/app/bonzai-your-local-ai-agent/id6752847988) (proprietary)
|
||||
- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT)
|
||||
- [Dot](https://github.com/alexpinel/Dot) (GPL)
|
||||
- [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT)
|
||||
|
|
|
|||
58
SECURITY.md
58
SECURITY.md
|
|
@ -1,12 +1,52 @@
|
|||
# Security Policy
|
||||
|
||||
- [**Reporting a vulnerability**](#reporting-a-vulnerability)
|
||||
- [**Requirements**](#requirements)
|
||||
- [**Covered Topics**](#covered-topics)
|
||||
- [**Using llama.cpp securely**](#using-llamacpp-securely)
|
||||
- [Untrusted models](#untrusted-models)
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Untrusted environments or networks](#untrusted-environments-or-networks)
|
||||
- [Multi-Tenant environments](#multi-tenant-environments)
|
||||
- [**Reporting a vulnerability**](#reporting-a-vulnerability)
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
If you have discovered a security vulnerability in this project that falls inside the [covered topics](#covered-topics), please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/ggml-org/llama.cpp/security/advisories/new).
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> For collaborators: if you are interested in helping out with reviewing privting security disclosures, please see: https://github.com/ggml-org/llama.cpp/discussions/18080
|
||||
|
||||
## Requirements
|
||||
|
||||
Before submitting your report, ensure you meet the following requirements:
|
||||
|
||||
- You have read this policy and fully understand it.
|
||||
- AI is only permitted in an assistive capacity as stated in [AGENTS.md](AGENTS.md). We do not accept reports that are written exclusively by AI.
|
||||
- Your report must include a working Proof-of-Concept in the form of a script and/or attached files.
|
||||
|
||||
Maintainers reserve the right to close the report if these requirements are not fulfilled.
|
||||
|
||||
## Covered Topics
|
||||
|
||||
Only vulnerabilities that fall within these parts of the project are considered valid. For problems falling outside of this list, please report them as issues.
|
||||
|
||||
- `src/**/*`
|
||||
- `ggml/**/*`
|
||||
- `gguf-py/**/*`
|
||||
- `tools/server/*`, **excluding** the following topics:
|
||||
- Web UI
|
||||
- Features marked as experimental
|
||||
- Features not recommended for use in untrusted environments (e.g., router, MCP)
|
||||
- Bugs that can lead to Denial-of-Service attack
|
||||
|
||||
Note that none of the topics under [Using llama.cpp securely](#using-llamacpp-securely) are considered vulnerabilities in LLaMA C++.
|
||||
|
||||
For vulnerabilities that fall within the `vendor` directory, please report them directly to the third-party project.
|
||||
|
||||
## Using llama.cpp securely
|
||||
|
||||
|
|
@ -55,19 +95,3 @@ If you intend to run multiple models in parallel with shared memory, it is your
|
|||
3. Model Sharing: In a multitenant model sharing design, tenants and users must understand the security risks of running code provided by others. Since there are no reliable methods to detect malicious models, sandboxing the model execution is the recommended approach to mitigate the risk.
|
||||
|
||||
4. Hardware Attacks: GPUs or TPUs can also be attacked. [Researches](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other models or processes running on the same system at the same time.
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
Beware that none of the topics under [Using llama.cpp securely](#using-llamacpp-securely) are considered vulnerabilities of LLaMA C++.
|
||||
|
||||
<!-- normal version -->
|
||||
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/ggml-org/llama.cpp/security/advisories/new).
|
||||
|
||||
Please note that using AI to identify vulnerabilities and generate reports is permitted. However, you must (1) explicitly disclose how AI was used and (2) conduct a thorough manual review before submitting the report.
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> For collaborators: if you are interested in helping out with reviewing privting security disclosures, please see: https://github.com/ggml-org/llama.cpp/discussions/18080
|
||||
|
|
|
|||
|
|
@ -297,7 +297,8 @@ function gg_sum_test_scripts {
|
|||
}
|
||||
|
||||
function gg_get_model {
|
||||
local gguf_0="$MNT/models/qwen3/0.6B/ggml-model-f16.gguf"
|
||||
#local gguf_0="$MNT/models/qwen3/0.6B/ggml-model-f16.gguf"
|
||||
local gguf_0="$MNT/models/qwen3/0.6B/ggml-model-q4_0.gguf"
|
||||
if [[ -s $gguf_0 ]]; then
|
||||
echo -n "$gguf_0"
|
||||
else
|
||||
|
|
|
|||
|
|
@ -1295,7 +1295,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
[](common_params & params) {
|
||||
params.kv_unified = true;
|
||||
}
|
||||
).set_env("LLAMA_ARG_KV_UNIFIED").set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_PERPLEXITY}));
|
||||
).set_env("LLAMA_ARG_KV_UNIFIED").set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_BATCHED}));
|
||||
add_opt(common_arg(
|
||||
{"--context-shift"},
|
||||
{"--no-context-shift"},
|
||||
|
|
@ -2877,10 +2877,18 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|||
params.n_threads_http = value;
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
|
||||
add_opt(common_arg(
|
||||
{"--cache-prompt"},
|
||||
{"--no-cache-prompt"},
|
||||
string_format("whether to enable prompt caching (default: %s)", params.cache_prompt ? "enabled" : "disabled"),
|
||||
[](common_params & params, bool value) {
|
||||
params.cache_prompt = value;
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_PROMPT"));
|
||||
add_opt(common_arg(
|
||||
{"--cache-reuse"}, "N",
|
||||
string_format(
|
||||
"min chunk size to attempt reusing from the cache via KV shifting (default: %d)\n"
|
||||
"min chunk size to attempt reusing from the cache via KV shifting, requires prompt caching to be enabled (default: %d)\n"
|
||||
"[(card)](https://ggml.ai/f0.png)", params.n_cache_reuse
|
||||
),
|
||||
[](common_params & params, int value) {
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ int32_t cpu_get_num_math();
|
|||
//
|
||||
|
||||
enum llama_example {
|
||||
LLAMA_EXAMPLE_BATCHED,
|
||||
LLAMA_EXAMPLE_DEBUG,
|
||||
LLAMA_EXAMPLE_COMMON,
|
||||
LLAMA_EXAMPLE_SPECULATIVE,
|
||||
|
|
@ -475,6 +476,7 @@ struct common_params {
|
|||
int32_t timeout_write = timeout_read; // http write timeout in seconds
|
||||
int32_t n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool)
|
||||
int32_t n_cache_reuse = 0; // min chunk size to reuse from the cache via KV shifting
|
||||
bool cache_prompt = true; // whether to enable prompt caching
|
||||
int32_t n_ctx_checkpoints = 8; // max number of context checkpoints per slot
|
||||
int32_t cache_ram_mib = 8192; // -1 = no limit, 0 - disable, 1 = 1 MiB, etc.
|
||||
|
||||
|
|
|
|||
|
|
@ -4371,7 +4371,37 @@ class Qwen3NextModel(Qwen2MoeModel):
|
|||
elif name.endswith("norm.weight") and not name.endswith("linear_attn.norm.weight"):
|
||||
data_torch = data_torch + 1
|
||||
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
if "in_proj_qkvz.weight" in name:
|
||||
# original order: [q, k, v, z] * head_count
|
||||
# corrected order: [q * head_count, k * head_count, v * head_count, z * head_count]
|
||||
head_k_dim = self.hparams["linear_key_head_dim"]
|
||||
head_v_dim = self.hparams["linear_value_head_dim"]
|
||||
num_v_heads = self.hparams["linear_num_value_heads"]
|
||||
num_k_heads = self.hparams["linear_num_key_heads"]
|
||||
hidden_size = self.hparams["hidden_size"]
|
||||
split_arg_list_qkvz = [
|
||||
head_k_dim, # q partition
|
||||
head_k_dim, # k partition
|
||||
(num_v_heads // num_k_heads * head_v_dim), # v partition
|
||||
(num_v_heads // num_k_heads * head_v_dim), # z partition
|
||||
]
|
||||
# view as (n_embd, head_count, [q+k+v+z])
|
||||
data_torch = data_torch.permute(1, 0).contiguous()
|
||||
data_torch = data_torch.view(-1, num_k_heads, sum(split_arg_list_qkvz))
|
||||
# split into q, k, v, z
|
||||
q, k, v, z = torch.split(data_torch, split_arg_list_qkvz, dim=-1)
|
||||
# flatten dim + head_count
|
||||
q = q.contiguous().view(hidden_size, -1)
|
||||
k = k.contiguous().view(hidden_size, -1)
|
||||
v = v.contiguous().view(hidden_size, -1)
|
||||
z = z.contiguous().view(hidden_size, -1)
|
||||
# stack back
|
||||
qkv = torch.cat([q, k, v], dim=-1).permute(1, 0).contiguous()
|
||||
z = z.permute(1, 0).contiguous()
|
||||
yield (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_QKV, bid, ".weight"), qkv)
|
||||
yield (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_GATE, bid, ".weight"), z)
|
||||
else:
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
|
||||
@ModelBase.register("RND1")
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ int main(int argc, char ** argv) {
|
|||
params.prompt = "Hello my name is";
|
||||
params.n_predict = 32;
|
||||
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BATCHED, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -57,11 +57,21 @@ struct callback_data {
|
|||
}
|
||||
};
|
||||
|
||||
static bool has_pooling(llama_context * ctx) {
|
||||
switch (llama_pooling_type(ctx)) {
|
||||
case LLAMA_POOLING_TYPE_NONE:
|
||||
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
struct output_data {
|
||||
float * data_ptr = nullptr;
|
||||
int data_size = 0;
|
||||
std::string type_suffix;
|
||||
std::vector<float> storage;
|
||||
std::vector<float> embd_norm;
|
||||
std::string prompt;
|
||||
std::vector<llama_token> tokens;
|
||||
|
||||
|
|
@ -73,24 +83,32 @@ struct output_data {
|
|||
prompt = params.prompt;
|
||||
|
||||
if (params.embedding) {
|
||||
const int n_embd = llama_model_n_embd_out(model);
|
||||
const bool pooling_enabled = llama_pooling_type(ctx) != LLAMA_POOLING_TYPE_NONE;
|
||||
const int n_embd_count = pooling_enabled ? 1 : tokens.size();
|
||||
const int n_embeddings = n_embd * n_embd_count;
|
||||
const int n_embd = llama_model_n_embd_out(model);
|
||||
const bool pooling = has_pooling(ctx);
|
||||
const int n_embd_count = pooling ? 1 : tokens.size();
|
||||
const int n_floats = n_embd * n_embd_count;
|
||||
|
||||
float * embeddings;
|
||||
if (pooling_enabled) {
|
||||
embeddings = llama_get_embeddings_seq(ctx, 0);
|
||||
storage.resize(n_embeddings);
|
||||
common_embd_normalize(embeddings, storage.data(), n_embeddings, params.embd_normalize);
|
||||
embeddings = storage.data();
|
||||
} else {
|
||||
embeddings = llama_get_embeddings(ctx);
|
||||
float * embd_raw = pooling ? llama_get_embeddings_seq(ctx, 0) : llama_get_embeddings(ctx);
|
||||
if (embd_raw == nullptr) {
|
||||
throw std::runtime_error("failed to get embeddings from the model");
|
||||
}
|
||||
|
||||
data_ptr = embeddings;
|
||||
data_size = n_embeddings;
|
||||
LOG_DBG("pooling_enabled: %s\n", pooling ? "true" : "false");
|
||||
LOG_DBG("n_embd: %d\n", n_embd);
|
||||
LOG_DBG("n_floats: %d\n", n_floats);
|
||||
LOG_DBG("n_embd_count: %d\n", n_embd_count);
|
||||
|
||||
data_ptr = embd_raw;
|
||||
data_size = n_floats;
|
||||
type_suffix = "-embeddings";
|
||||
|
||||
if (params.embd_normalize >= 0) {
|
||||
embd_norm.resize(n_floats);
|
||||
for (int i = 0; i < n_embd_count; i++) {
|
||||
common_embd_normalize(embd_raw+i*n_embd, embd_norm.data()+i*n_embd, n_embd, params.embd_normalize);
|
||||
}
|
||||
data_ptr = embd_norm.data();
|
||||
}
|
||||
} else {
|
||||
const float * logits = llama_get_logits_ith(ctx, tokens.size() - 1);
|
||||
const int n_logits = llama_vocab_n_tokens(vocab);
|
||||
|
|
|
|||
|
|
@ -119,6 +119,8 @@ struct ggml_backend_vk_context;
|
|||
// Max number of adds that can be fused without exceeding MAX_PARAMETER_COUNT.
|
||||
#define MAX_FUSED_ADDS (MAX_PARAMETER_COUNT - 3)
|
||||
|
||||
typedef std::shared_ptr<struct vk_pipeline_struct> vk_pipeline;
|
||||
|
||||
struct vk_pipeline_struct {
|
||||
std::string name;
|
||||
vk::ShaderModule shader_module;
|
||||
|
|
@ -136,9 +138,15 @@ struct vk_pipeline_struct {
|
|||
std::atomic<bool> compiled {};
|
||||
// number of registers used, extracted from pipeline executable properties
|
||||
uint32_t register_count {};
|
||||
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
bool is_64b_indexing {};
|
||||
#endif
|
||||
// linked list of pipelines for multiple compilation variants.
|
||||
// currently only used to compile a 64-bit indexing variant.
|
||||
vk_pipeline next;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
|
||||
typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
|
||||
|
||||
static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
|
||||
|
|
@ -230,9 +238,7 @@ static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
|
|||
/* .is_host = */ NULL,
|
||||
};
|
||||
|
||||
#ifdef GGML_VULKAN_MEMORY_DEBUG
|
||||
class vk_memory_logger;
|
||||
#endif
|
||||
class vk_perf_logger;
|
||||
static void ggml_vk_destroy_buffer(vk_buffer& buf);
|
||||
static void ggml_vk_synchronize(ggml_backend_vk_context * ctx);
|
||||
|
|
@ -584,6 +590,8 @@ struct vk_device_struct {
|
|||
bool add_rms_fusion;
|
||||
uint32_t partials_binding_alignment;
|
||||
|
||||
bool shader_64b_indexing;
|
||||
|
||||
bool integer_dot_product;
|
||||
// 0: default, 1: force mmvq, -1: disable mmvq
|
||||
int32_t mmvq_mode;
|
||||
|
|
@ -815,9 +823,7 @@ struct vk_device_struct {
|
|||
bool allow_sysmem_fallback;
|
||||
bool disable_graph_optimize;
|
||||
|
||||
#ifdef GGML_VULKAN_MEMORY_DEBUG
|
||||
std::unique_ptr<vk_memory_logger> memory_logger;
|
||||
#endif
|
||||
|
||||
~vk_device_struct() {
|
||||
VK_LOG_DEBUG("destroy device " << name);
|
||||
|
|
@ -1553,8 +1559,9 @@ static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx, vk_contex
|
|||
static void ggml_vk_load_shaders(vk_device& device);
|
||||
static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx);
|
||||
|
||||
#if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
|
||||
#define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
|
||||
static bool vk_memory_logger_enabled = false;
|
||||
|
||||
#define VK_LOG_MEMORY(msg) if (vk_memory_logger_enabled) { std::cerr << "ggml_vulkan memory: " << msg << std::endl; }
|
||||
|
||||
static std::string format_size(size_t size) {
|
||||
const size_t kib = 1024;
|
||||
|
|
@ -1587,10 +1594,10 @@ private:
|
|||
std::map<vk::Buffer, size_t> allocations; // Track allocations
|
||||
size_t total_device;
|
||||
size_t total_host;
|
||||
static std::mutex log_mutex;
|
||||
};
|
||||
#else
|
||||
#define VK_LOG_MEMORY(msg) ((void) 0)
|
||||
#endif // GGML_VULKAN_MEMORY_DEBUG
|
||||
|
||||
std::mutex vk_memory_logger::log_mutex;
|
||||
|
||||
static bool vk_perf_logger_enabled = false;
|
||||
static bool vk_perf_logger_concurrent = false;
|
||||
|
|
@ -1897,10 +1904,10 @@ struct ggml_backend_vk_buffer_context {
|
|||
}
|
||||
};
|
||||
|
||||
#ifdef GGML_VULKAN_MEMORY_DEBUG
|
||||
static std::mutex log_mutex;
|
||||
|
||||
void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
|
||||
if (!vk_memory_logger_enabled) {
|
||||
return;
|
||||
}
|
||||
std::lock_guard<std::mutex> guard(log_mutex);
|
||||
vk_buffer buf = buf_ref.lock();
|
||||
const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
|
||||
|
|
@ -1912,7 +1919,7 @@ void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
|
|||
}
|
||||
|
||||
void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
|
||||
if (buf_ref.expired() || buf_ref.lock()->size == 0) {
|
||||
if (buf_ref.expired() || buf_ref.lock()->size == 0 || !vk_memory_logger_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1930,7 +1937,6 @@ void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
|
|||
VK_LOG_MEMORY("ERROR " << buf->device->name << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
|
||||
}
|
||||
}
|
||||
#endif // GGML_VULKAN_MEMORY_DEBUG
|
||||
|
||||
struct vk_instance_t {
|
||||
vk::Instance instance;
|
||||
|
|
@ -2080,6 +2086,19 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin
|
|||
compute_pipeline_create_info.setPNext(&rci);
|
||||
}
|
||||
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
vk::PipelineCreateFlags2CreateInfo pipelineFlags2CreateInfo;
|
||||
if (pipeline->is_64b_indexing)
|
||||
{
|
||||
pipelineFlags2CreateInfo.flags = vk::PipelineCreateFlagBits2::e64BitIndexingEXT;
|
||||
if (device->pipeline_executable_properties_support) {
|
||||
pipelineFlags2CreateInfo.flags |= vk::PipelineCreateFlagBits2::eCaptureStatisticsKHR;
|
||||
}
|
||||
pipelineFlags2CreateInfo.setPNext(compute_pipeline_create_info.pNext);
|
||||
compute_pipeline_create_info.setPNext(&pipelineFlags2CreateInfo);
|
||||
}
|
||||
#endif
|
||||
|
||||
try {
|
||||
pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
|
||||
} catch (const vk::SystemError& e) {
|
||||
|
|
@ -2570,9 +2589,7 @@ static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std
|
|||
buf->bda_addr = device->device.getBufferAddress(addressInfo);
|
||||
}
|
||||
|
||||
#ifdef GGML_VULKAN_MEMORY_DEBUG
|
||||
device->memory_logger->log_allocation(buf, size);
|
||||
#endif
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
@ -2629,11 +2646,9 @@ static void ggml_vk_destroy_buffer(vk_buffer& buf) {
|
|||
return;
|
||||
}
|
||||
|
||||
#ifdef GGML_VULKAN_MEMORY_DEBUG
|
||||
if (buf->device != nullptr) {
|
||||
buf->device->memory_logger->log_deallocation(buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
buf.reset();
|
||||
}
|
||||
|
|
@ -3002,6 +3017,11 @@ static void ggml_vk_load_shaders(vk_device& device) {
|
|||
if ((device->architecture == AMD_GCN) && (device->driver_id != vk::DriverId::eAmdProprietary)) {
|
||||
m_warptile_mmq = m_warptile_mmq_int = { 256, 64, 64, 32, 16, 16, 2, 2, 2, 1, 16 };
|
||||
m_warptile_mmqid = m_warptile_mmqid_int = { 256, 64, 64, 32, 16, 16, 2, 2, 2, 1, 16 };
|
||||
} else if (device->vendor_id == VK_VENDOR_ID_AMD && device->coopmat_support && device->driver_id != vk::DriverId::eAmdProprietary) {
|
||||
// This is intentionally using tx_m values, slight performance increase
|
||||
l_warptile = { 256, 128, 128, 16, subgroup_size_8, 64, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
|
||||
l_warptile_mmq = l_warptile_mmq_int = { 256, 128, 128, 32, subgroup_size_8, 64, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
|
||||
l_warptile_mmq_int_k = { 256, 128, 128, 32, subgroup_size_16, 64, 1, 4, 2, 1, subgroup_size_16 };
|
||||
} else if (device->vendor_id == VK_VENDOR_ID_INTEL && device->coopmat_support && device->architecture == INTEL_XE2) {
|
||||
// Xe2/Xe3 with coopmat enabled - warptile performance tuning
|
||||
l_warptile = { 512, 128, 128, 16, subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
|
||||
|
|
@ -3061,7 +3081,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
|
|||
}
|
||||
|
||||
std::vector<std::future<void>> compiles;
|
||||
auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const char *name, size_t spv_size, const void* spv_data, const char *entrypoint,
|
||||
auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& base_pipeline, const char *name, size_t spv_size, const void* spv_data, const char *entrypoint,
|
||||
uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, const std::vector<uint32_t>& specialization_constants,
|
||||
uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) {
|
||||
|
||||
|
|
@ -3069,35 +3089,49 @@ static void ggml_vk_load_shaders(vk_device& device) {
|
|||
required_subgroup_size = get_subgroup_size(name, device->architecture);
|
||||
}
|
||||
|
||||
if (!pipeline) {
|
||||
pipeline = std::make_shared<vk_pipeline_struct>();
|
||||
}
|
||||
if (!pipeline->initialized) {
|
||||
pipeline->name = name;
|
||||
pipeline->parameter_count = parameter_count;
|
||||
pipeline->push_constant_size = push_constant_size;
|
||||
pipeline->wg_denoms = wg_denoms;
|
||||
pipeline->align = align;
|
||||
pipeline->initialized = true;
|
||||
}
|
||||
vk_pipeline *ptr = &base_pipeline;
|
||||
|
||||
if (!pipeline->needed || pipeline->compiled) {
|
||||
return;
|
||||
int num_pipelines = 1;
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
if (device->shader_64b_indexing) {
|
||||
num_pipelines = 2;
|
||||
}
|
||||
// TODO: We're no longer benefitting from the async compiles (shaders are
|
||||
// compiled individually, as needed) and this complexity can be removed.
|
||||
{
|
||||
// wait until fewer than N compiles are in progress
|
||||
uint32_t N = std::max(1u, std::thread::hardware_concurrency());
|
||||
std::unique_lock<std::mutex> guard(compile_count_mutex);
|
||||
while (compile_count >= N) {
|
||||
compile_count_cond.wait(guard);
|
||||
#endif
|
||||
for (int i = 0; i < num_pipelines; ++i, ptr = &(*ptr)->next) {
|
||||
vk_pipeline &pipeline = *ptr;
|
||||
if (!pipeline) {
|
||||
pipeline = std::make_shared<vk_pipeline_struct>();
|
||||
}
|
||||
if (!pipeline->initialized) {
|
||||
pipeline->name = name;
|
||||
pipeline->parameter_count = parameter_count;
|
||||
pipeline->push_constant_size = push_constant_size;
|
||||
pipeline->wg_denoms = wg_denoms;
|
||||
pipeline->align = align;
|
||||
pipeline->initialized = true;
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
pipeline->is_64b_indexing = (i == 1);
|
||||
#endif
|
||||
}
|
||||
compile_count++;
|
||||
}
|
||||
|
||||
compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), spv_size, spv_data, entrypoint,
|
||||
parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
|
||||
if (!pipeline->needed || pipeline->compiled) {
|
||||
continue;
|
||||
}
|
||||
// TODO: We're no longer benefitting from the async compiles (shaders are
|
||||
// compiled individually, as needed) and this complexity can be removed.
|
||||
{
|
||||
// wait until fewer than N compiles are in progress
|
||||
uint32_t N = std::max(1u, std::thread::hardware_concurrency());
|
||||
std::unique_lock<std::mutex> guard(compile_count_mutex);
|
||||
while (compile_count >= N) {
|
||||
compile_count_cond.wait(guard);
|
||||
}
|
||||
compile_count++;
|
||||
}
|
||||
|
||||
compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), spv_size, spv_data, entrypoint,
|
||||
parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
|
||||
}
|
||||
};
|
||||
|
||||
auto const &ggml_vk_create_pipeline2 = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const char *entrypoint,
|
||||
|
|
@ -4435,9 +4469,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
|
|||
vk_device device = std::make_shared<vk_device_struct>();
|
||||
vk_instance.devices[idx] = device;
|
||||
|
||||
#ifdef GGML_VULKAN_MEMORY_DEBUG
|
||||
device->memory_logger = std::unique_ptr<vk_memory_logger>(new vk_memory_logger());
|
||||
#endif
|
||||
|
||||
size_t dev_num = vk_instance.device_indices[idx];
|
||||
|
||||
|
|
@ -4475,6 +4507,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
|
|||
bool pipeline_executable_properties_support = false;
|
||||
device->coopmat_support = false;
|
||||
device->integer_dot_product = false;
|
||||
device->shader_64b_indexing = false;
|
||||
bool bfloat16_support = false;
|
||||
|
||||
for (const auto& properties : ext_props) {
|
||||
|
|
@ -4522,6 +4555,10 @@ static vk_device ggml_vk_get_device(size_t idx) {
|
|||
device->memory_priority = true;
|
||||
} else if (strcmp("VK_EXT_external_memory_host", properties.extensionName) == 0) {
|
||||
device->external_memory_host = true;
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
} else if (strcmp("VK_EXT_shader_64bit_indexing", properties.extensionName) == 0) {
|
||||
device->shader_64b_indexing = true;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4812,6 +4849,16 @@ static vk_device ggml_vk_get_device(size_t idx) {
|
|||
device_extensions.push_back("VK_EXT_external_memory_host");
|
||||
}
|
||||
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
VkPhysicalDeviceShader64BitIndexingFeaturesEXT shader_64bit_indexing_features {};
|
||||
shader_64bit_indexing_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_64_BIT_INDEXING_FEATURES_EXT;
|
||||
if (device->shader_64b_indexing) {
|
||||
last_struct->pNext = (VkBaseOutStructure *)&shader_64bit_indexing_features;
|
||||
last_struct = (VkBaseOutStructure *)&shader_64bit_indexing_features;
|
||||
device_extensions.push_back("VK_EXT_shader_64bit_indexing");
|
||||
}
|
||||
#endif
|
||||
|
||||
vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2);
|
||||
|
||||
device->pipeline_executable_properties_support = pipeline_executable_properties_support;
|
||||
|
|
@ -5078,7 +5125,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
|
|||
switch (device->vendor_id) {
|
||||
#ifndef GGML_VULKAN_RUN_TESTS
|
||||
case VK_VENDOR_ID_AMD:
|
||||
device->mul_mat_l[i] = false;
|
||||
device->mul_mat_l[i] = device->coopmat_support && device->driver_id != vk::DriverId::eAmdProprietary;
|
||||
device->mul_mat_m[i] = true;
|
||||
device->mul_mat_s[i] = true;
|
||||
device->mul_mat_id_l[i] = false;
|
||||
|
|
@ -5419,6 +5466,7 @@ static void ggml_vk_instance_init() {
|
|||
vk_perf_logger_enabled = getenv("GGML_VK_PERF_LOGGER") != nullptr;
|
||||
vk_perf_logger_concurrent = getenv("GGML_VK_PERF_LOGGER_CONCURRENT") != nullptr;
|
||||
vk_enable_sync_logger = getenv("GGML_VK_SYNC_LOGGER") != nullptr;
|
||||
vk_memory_logger_enabled = getenv("GGML_VK_MEMORY_LOGGER") != nullptr;
|
||||
const char* GGML_VK_PERF_LOGGER_FREQUENCY = getenv("GGML_VK_PERF_LOGGER_FREQUENCY");
|
||||
|
||||
if (GGML_VK_PERF_LOGGER_FREQUENCY != nullptr) {
|
||||
|
|
@ -6897,6 +6945,20 @@ static void ggml_vk_quantize_q8_1(ggml_backend_vk_context * ctx, vk_context& sub
|
|||
ggml_vk_sync_buffers(ctx, subctx);
|
||||
}
|
||||
|
||||
static vk_pipeline ggml_vk_get_64b_indexing_pipeline(ggml_backend_vk_context * ctx, vk_pipeline &pipeline) {
|
||||
GGML_UNUSED(ctx);
|
||||
#if defined(VK_EXT_shader_64bit_indexing)
|
||||
vk_pipeline *ptr = &pipeline;
|
||||
while (*ptr) {
|
||||
if ((*ptr)->is_64b_indexing) {
|
||||
return *ptr;
|
||||
}
|
||||
ptr = &(*ptr)->next;
|
||||
}
|
||||
#endif
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool disable_split_k) {
|
||||
VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << ggml_type_name(src0->type) << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
|
||||
std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << ggml_type_name(src1->type) << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
|
||||
|
|
@ -6980,6 +7042,10 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub
|
|||
|
||||
vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned, qx_needs_dequant ? f16_type : src0->type, quantize_y ? GGML_TYPE_Q8_1 : (y_f32_kernel ? GGML_TYPE_F32 : src1->type));
|
||||
|
||||
if (ggml_nbytes(src0) > ctx->device->properties.limits.maxStorageBufferRange) {
|
||||
pipeline = ggml_vk_get_64b_indexing_pipeline(ctx, pipeline);
|
||||
}
|
||||
|
||||
// Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking
|
||||
uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) : ne11;
|
||||
const uint64_t x_ne = ggml_nelements(src0);
|
||||
|
|
@ -7289,6 +7355,10 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context&
|
|||
to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1);
|
||||
}
|
||||
|
||||
if (ggml_nbytes(src0) > ctx->device->properties.limits.maxStorageBufferRange) {
|
||||
dmmv = ggml_vk_get_64b_indexing_pipeline(ctx, dmmv);
|
||||
}
|
||||
|
||||
const bool qx_needs_dequant = x_non_contig;
|
||||
const bool qy_needs_dequant = !quantize_y && ((src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig);
|
||||
|
||||
|
|
@ -7484,9 +7554,15 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c
|
|||
gqa_ratio = 1;
|
||||
}
|
||||
|
||||
vk_pipeline pipeline = ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1];
|
||||
|
||||
if (ggml_nbytes(src0) > ctx->device->properties.limits.maxStorageBufferRange) {
|
||||
pipeline = ggml_vk_get_64b_indexing_pipeline(ctx, pipeline);
|
||||
}
|
||||
|
||||
{
|
||||
// Request descriptor sets
|
||||
ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1);
|
||||
ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
|
||||
}
|
||||
|
||||
vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true);
|
||||
|
|
@ -7528,7 +7604,7 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c
|
|||
workgroups_z /= gqa_ratio;
|
||||
}
|
||||
|
||||
ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1],
|
||||
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
|
||||
{
|
||||
d_Qx,
|
||||
d_Qy,
|
||||
|
|
@ -7578,9 +7654,14 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con
|
|||
const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
|
||||
const uint32_t channel_stride_y = nb12 / sizeof(float);
|
||||
|
||||
vk_pipeline pipeline = ctx->device->pipeline_mul_mat_vec_nc_f16_f32;
|
||||
if (ggml_nbytes(src0) > ctx->device->properties.limits.maxStorageBufferRange) {
|
||||
pipeline = ggml_vk_get_64b_indexing_pipeline(ctx, pipeline);
|
||||
}
|
||||
|
||||
{
|
||||
// Request descriptor sets
|
||||
ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
|
||||
ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
|
||||
}
|
||||
|
||||
vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true);
|
||||
|
|
@ -7617,7 +7698,7 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con
|
|||
|
||||
init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]);
|
||||
|
||||
ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
|
||||
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
|
||||
{
|
||||
d_Qx,
|
||||
d_Qy,
|
||||
|
|
@ -7636,8 +7717,9 @@ static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, c
|
|||
// Handle huge A matrix by splitting the M dimensions. This works well for convolution use cases
|
||||
// where the M dimension is very large.
|
||||
// Split_k doesn't work with M splitting.
|
||||
// This only supports batchsize == 1.
|
||||
const size_t nbytes = ggml_nbytes(src0);
|
||||
const bool needs_split = nbytes > ctx->device->properties.limits.maxStorageBufferRange;
|
||||
const bool needs_split = dst->ne[2] == 1 && dst->ne[3] == 1 && nbytes > ctx->device->properties.limits.maxStorageBufferRange;
|
||||
if (needs_split) {
|
||||
// Choose the number of rows that can fit (and divide by two, to allow for any additional offsets)
|
||||
const uint32_t M_split = ctx->device->properties.limits.maxStorageBufferRange / (2 * src0->nb[1]);
|
||||
|
|
@ -7779,6 +7861,9 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context&
|
|||
|
||||
vk_pipeline pipeline = ggml_vk_guess_matmul_id_pipeline(ctx, mmp, ne01, nei1, aligned, qx_needs_dequant ? f16_type : src0->type);
|
||||
|
||||
if (ggml_nbytes(src0) > ctx->device->properties.limits.maxStorageBufferRange) {
|
||||
pipeline = ggml_vk_get_64b_indexing_pipeline(ctx, pipeline);
|
||||
}
|
||||
// Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking
|
||||
uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) :ne11;
|
||||
const uint64_t x_ne = ggml_nelements(src0);
|
||||
|
|
@ -8040,6 +8125,10 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte
|
|||
const bool qx_needs_dequant = x_non_contig;
|
||||
const bool qy_needs_dequant = !quantize_y && ((src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig);
|
||||
|
||||
if (ggml_nbytes(src0) > ctx->device->properties.limits.maxStorageBufferRange) {
|
||||
dmmv = ggml_vk_get_64b_indexing_pipeline(ctx, dmmv);
|
||||
}
|
||||
|
||||
// Not implemented
|
||||
GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
|
||||
GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
|
||||
|
|
|
|||
|
|
@ -87,7 +87,6 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
|
|||
const uint tid = gl_LocalInvocationID.x;
|
||||
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
a_offset /= QUANT_K;
|
||||
|
||||
y_offset = QUANT_R == 1 ? 1 : QUANT_K/2;
|
||||
|
||||
|
|
|
|||
|
|
@ -65,9 +65,9 @@ void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) {
|
|||
|
||||
a_offset =
|
||||
#ifdef MUL_MAT_ID
|
||||
expert_id * p.batch_stride_a;
|
||||
expert_id * (p.batch_stride_a / QUANT_K);
|
||||
#else
|
||||
batch_idx_a * p.batch_stride_a;
|
||||
batch_idx_a * (p.batch_stride_a / QUANT_K);
|
||||
#endif
|
||||
b_offset =
|
||||
#ifdef MUL_MAT_ID
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32,
|
|||
const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
|
||||
// Compute starting index in matrix B for this superblock
|
||||
const uint y_idx = i * QUANT_K + 32 * ib32;
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
|
||||
// Precompute indices for quantization lookup tables
|
||||
const uint qh_base = 2 * ib32;
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32,
|
|||
const vec4 b_val_1 = vec4(data_b_v4[base_b_idx + 2 * l + 1]);
|
||||
|
||||
// index for data_a
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const float d = float(data_a[ibi].d);
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
const uint nibble_shift = 4 * (itid & 1);
|
||||
const uint ib32 = itid / 2; // 0..7
|
||||
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const float d = float(data_a[ibi].d);
|
||||
const uint scale = (data_a[ibi].scales[ib32] >> nibble_shift) & 0xF;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
const uint y_idx = i * QUANT_K + 16 * itid;
|
||||
const uint nibble_shift = 4 * (itid & 1);
|
||||
const uint ib32 = itid / 2; // 0..7
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
// Precompute db multiplication factors
|
||||
float db_vals[NUM_ROWS];
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
|
|
@ -22,7 +22,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
db_vals[n] = d * (0.125f + float(scale) * 0.25f);
|
||||
ibi += num_blocks_per_row;
|
||||
}
|
||||
ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
// Preload grid and sign data for all l values
|
||||
vec4 grid0_vals[2], grid1_vals[2];
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
const uint y_idx = i * QUANT_K + 16 * itid;
|
||||
const uint ib32 = itid / 2; // 0..7
|
||||
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const float d = float(data_a[ibi].d);
|
||||
const uint signscale = pack32(u16vec2(
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
|
|||
void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
|
||||
const uint y_idx = i * QUANT_K + 32 * ib32;
|
||||
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const float d = float(data_a[ibi].d);
|
||||
const uint scale = (data_a[ibi].scales[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
const uint y_idx = i * QUANT_K + 16 * itid;
|
||||
const uint ib32 = itid / 2; // 0..7
|
||||
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
uint ibi = a_offset + first_row * num_blocks_per_row + i;
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const float d = float(data_a[ibi].d);
|
||||
const uint signscale = pack32(u16vec2(
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
const uint y_idx = i * QUANT_K + y_offset;
|
||||
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
|
||||
const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
|
||||
csel ^= 1;
|
||||
|
||||
if (!all_threads) { // when we don't have enough blocks to use all threads
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint ix, co
|
|||
const uint y_idx = i * QUANT_K + y_offset;
|
||||
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
|
||||
const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
|
||||
csel ^= 1;
|
||||
|
||||
if (!all_threads) { // when we don't have enough blocks to use all threads
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint v_im,
|
|||
const uint y2_idx = y1_idx + 128;
|
||||
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
|
||||
const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
|
||||
const FLOAT_TYPE_VEC2 dm = FLOAT_TYPE_VEC2(data_a[ib0 + i].dm);
|
||||
|
||||
const uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ];
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint v_im,
|
|||
const uint y2_idx = y1_idx + 128;
|
||||
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
|
||||
const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
|
||||
const FLOAT_TYPE_VEC2 dm = FLOAT_TYPE_VEC2(data_a[ib0 + i].dm);
|
||||
|
||||
const uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ];
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
|||
const uint y_idx = i * QUANT_K + y_offset;
|
||||
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
|
||||
const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
|
||||
csel ^= 1;
|
||||
|
||||
if (!all_threads) { // when we don't have enough blocks to use all threads
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
|
|||
const uint tid = gl_LocalInvocationID.x;
|
||||
|
||||
get_offsets(a_offset, b_offset, d_offset);
|
||||
a_offset /= QUANT_K_Q8_1;
|
||||
a_offset *= QUANT_K / QUANT_K_Q8_1;
|
||||
b_offset /= QUANT_K_Q8_1;
|
||||
|
||||
FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
|
||||
|
|
|
|||
|
|
@ -234,13 +234,13 @@ void main() {
|
|||
const uint end_k = min(p.K, (ik + 1) * p.k_split);
|
||||
#endif
|
||||
|
||||
uint pos_a = (
|
||||
uint pos_a =
|
||||
#ifdef MUL_MAT_ID
|
||||
expert_idx * p.batch_stride_a +
|
||||
expert_idx * (p.batch_stride_a / LOAD_VEC_A) +
|
||||
#else
|
||||
batch_idx_a * p.batch_stride_a +
|
||||
batch_idx_a * (p.batch_stride_a / LOAD_VEC_A) +
|
||||
#endif
|
||||
ir * BM * p.stride_a + start_k) / LOAD_VEC_A;
|
||||
(ir * BM * p.stride_a + start_k) / LOAD_VEC_A;
|
||||
#ifdef MUL_MAT_ID
|
||||
uint pos_b = 0;
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -250,10 +250,10 @@ void main() {
|
|||
#endif
|
||||
|
||||
#ifdef MUL_MAT_ID
|
||||
uint pos_a = (expert_idx * p.batch_stride_a) / QUANT_K;
|
||||
uint pos_a = expert_idx * (p.batch_stride_a / QUANT_K);
|
||||
uint pos_b = 0;
|
||||
#else
|
||||
uint pos_a = (batch_idx_a * p.batch_stride_a) / QUANT_K;
|
||||
uint pos_a = batch_idx_a * (p.batch_stride_a / QUANT_K);
|
||||
uint pos_b = batch_idx * p.batch_stride_b;
|
||||
uint pos_d = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z;
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -189,13 +189,13 @@ void main() {
|
|||
const uint end_k = min(p.K, (ik + 1) * p.k_split);
|
||||
#endif
|
||||
|
||||
uint pos_a_ib = (
|
||||
uint pos_a_ib =
|
||||
#ifdef MUL_MAT_ID
|
||||
expert_idx * p.batch_stride_a +
|
||||
expert_idx * (p.batch_stride_a / BK) +
|
||||
#else
|
||||
batch_idx_a * p.batch_stride_a +
|
||||
batch_idx_a * (p.batch_stride_a / BK) +
|
||||
#endif
|
||||
ir * BM * p.stride_a + start_k) / BK;
|
||||
(ir * BM * p.stride_a + start_k) / BK;
|
||||
#ifdef MUL_MAT_ID
|
||||
uint pos_b_ib = 0;
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -1763,6 +1763,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_POST_NORM,
|
||||
MODEL_TENSOR.ATTN_GATE,
|
||||
MODEL_TENSOR.ATTN_QKV,
|
||||
MODEL_TENSOR.FFN_GATE_INP,
|
||||
MODEL_TENSOR.FFN_GATE_INP_SHEXP,
|
||||
MODEL_TENSOR.FFN_UP_SHEXP,
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ vendor = {
|
|||
# "https://github.com/mackron/miniaudio/raw/refs/tags/0.11.23/miniaudio.h": "vendor/miniaudio/miniaudio.h",
|
||||
"https://github.com/mackron/miniaudio/raw/669ed3e844524fcd883231b13095baee9f6de304/miniaudio.h": "vendor/miniaudio/miniaudio.h",
|
||||
|
||||
"https://raw.githubusercontent.com/yhirose/cpp-httplib/refs/tags/v0.30.0/httplib.h": "vendor/cpp-httplib/httplib.h",
|
||||
"https://raw.githubusercontent.com/yhirose/cpp-httplib/refs/tags/v0.30.0/LICENSE": "vendor/cpp-httplib/LICENSE",
|
||||
"https://raw.githubusercontent.com/yhirose/cpp-httplib/refs/tags/v0.30.1/httplib.h": "vendor/cpp-httplib/httplib.h",
|
||||
"https://raw.githubusercontent.com/yhirose/cpp-httplib/refs/tags/v0.30.1/LICENSE": "vendor/cpp-httplib/LICENSE",
|
||||
|
||||
"https://raw.githubusercontent.com/sheredom/subprocess.h/b49c56e9fe214488493021017bf3954b91c7c1f5/subprocess.h": "vendor/sheredom/subprocess.h",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -964,6 +964,8 @@ static std::set<llm_tensor> llm_get_tensor_names(llm_arch arch) {
|
|||
LLM_TENSOR_ATTN_K_NORM,
|
||||
LLM_TENSOR_ATTN_V,
|
||||
LLM_TENSOR_ATTN_OUT,
|
||||
LLM_TENSOR_ATTN_QKV,
|
||||
LLM_TENSOR_ATTN_GATE,
|
||||
LLM_TENSOR_FFN_NORM,
|
||||
LLM_TENSOR_FFN_GATE_INP,
|
||||
LLM_TENSOR_FFN_GATE_EXPS,
|
||||
|
|
|
|||
|
|
@ -6942,7 +6942,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|||
} else {
|
||||
// Linear attention (gated delta net) specific tensors
|
||||
// Create tensors with calculated dimensions
|
||||
layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), { n_embd, qkvz_dim }, 0);
|
||||
// note: ssm_in is used by legacy GGUF
|
||||
layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), { n_embd, qkvz_dim }, TENSOR_NOT_REQUIRED);
|
||||
layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, key_dim * 2 + value_dim }, TENSOR_NOT_REQUIRED);
|
||||
layer.wqkv_gate = create_tensor(tn(LLM_TENSOR_ATTN_GATE, "weight", i), { n_embd, value_dim }, TENSOR_NOT_REQUIRED);
|
||||
layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), { hparams.ssm_d_conv, conv_dim }, 0);
|
||||
layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), { hparams.ssm_dt_rank }, 0);
|
||||
layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A_NOSCAN, i), { hparams.ssm_dt_rank }, 0);
|
||||
|
|
|
|||
|
|
@ -493,7 +493,8 @@ private:
|
|||
ggml_tensor * cur,
|
||||
int il);
|
||||
|
||||
ggml_tensor * build_delta_net_chunking(
|
||||
// returns pair of output and new state
|
||||
std::pair<ggml_tensor *, ggml_tensor *> build_delta_net_chunking(
|
||||
ggml_tensor * q,
|
||||
ggml_tensor * k,
|
||||
ggml_tensor * v,
|
||||
|
|
@ -505,7 +506,8 @@ private:
|
|||
ggml_tensor * diag_mask,
|
||||
int il);
|
||||
|
||||
ggml_tensor * build_delta_net_autoregressive(
|
||||
// returns pair of output and new state
|
||||
std::pair<ggml_tensor *, ggml_tensor *> build_delta_net_autoregressive(
|
||||
ggml_tensor * q,
|
||||
ggml_tensor * k,
|
||||
ggml_tensor * v,
|
||||
|
|
@ -520,6 +522,11 @@ private:
|
|||
ggml_tensor * gate,
|
||||
int layer);
|
||||
|
||||
// returns pair of qkv, z
|
||||
std::pair<ggml_tensor *, ggml_tensor *> build_qkvz(
|
||||
ggml_tensor * input,
|
||||
int il);
|
||||
|
||||
const llama_model & model;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -86,7 +86,15 @@ llm_build_qwen3next::llm_build_qwen3next(const llama_model & model, const llm_gr
|
|||
ggml_build_forward_expand(gf, cur);
|
||||
}
|
||||
|
||||
ggml_tensor * llm_build_qwen3next::build_delta_net_chunking(
|
||||
// utility to get one slice from the third dimension
|
||||
// input dim: [x, y, c, b]
|
||||
// output dim: [x, y, 1, b]
|
||||
static ggml_tensor * get_slice_2d(ggml_context * ctx0, ggml_tensor * t, int64_t c) {
|
||||
return ggml_view_4d(ctx0, t, t->ne[0], t->ne[1], 1, t->ne[3],
|
||||
t->nb[1], t->nb[2], t->nb[3], t->nb[2] * c);
|
||||
}
|
||||
|
||||
std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_delta_net_chunking(
|
||||
ggml_tensor * q,
|
||||
ggml_tensor * k,
|
||||
ggml_tensor * v,
|
||||
|
|
@ -187,18 +195,16 @@ ggml_tensor * llm_build_qwen3next::build_delta_net_chunking(
|
|||
beta = ggml_reshape_4d(ctx0, beta, 1, chunk_size, n_chunks, H_k * n_seqs);
|
||||
|
||||
ggml_tensor * g_cumsum = ggml_cumsum(ctx0, g);
|
||||
cb(g_cumsum, "g_cumsum", il); // shape: (chunk_size, 1, n_chunks, H_v * n_seqs)
|
||||
|
||||
cb(g_cumsum, "g_cumsum", il);
|
||||
|
||||
ggml_tensor * gcs_i = ggml_reshape_4d(ctx0, g_cumsum, chunk_size, 1, n_chunks, H_v * n_seqs);
|
||||
ggml_tensor * gcs_i = g_cumsum; // ggml_reshape_4d(ctx0, g_cumsum, chunk_size, 1, n_chunks, H_v * n_seqs);
|
||||
ggml_tensor * gcs_j = ggml_reshape_4d(ctx0, g_cumsum, 1, chunk_size, n_chunks, H_v * n_seqs);
|
||||
|
||||
ggml_tensor * gcs_j_broadcast =
|
||||
ggml_repeat_4d(ctx0, gcs_j, chunk_size, chunk_size, n_chunks, H_v * n_seqs);
|
||||
|
||||
ggml_tensor * decay_mask = ggml_sub(ctx0, gcs_j_broadcast, gcs_i);
|
||||
|
||||
cb(decay_mask, "decay_mask", il);
|
||||
cb(decay_mask, "decay_mask", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
decay_mask = ggml_mul(ctx0, decay_mask, diag_mask);
|
||||
decay_mask = ggml_exp(ctx0, decay_mask);
|
||||
|
|
@ -208,8 +214,7 @@ ggml_tensor * llm_build_qwen3next::build_delta_net_chunking(
|
|||
|
||||
ggml_tensor * k_decay = ggml_mul(ctx0, kmulkbeta, decay_mask);
|
||||
ggml_tensor * attn = ggml_neg(ctx0, ggml_mul(ctx0, k_decay, causal_mask));
|
||||
|
||||
cb(attn, "attn_pre_solve", il);
|
||||
cb(attn, "attn_pre_solve", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
ggml_tensor * attn_lower = ggml_mul(ctx0, attn, causal_mask);
|
||||
ggml_tensor * lhs = ggml_sub(ctx0, ggml_repeat(ctx0, identity, attn_lower), attn_lower);
|
||||
|
|
@ -217,8 +222,7 @@ ggml_tensor * llm_build_qwen3next::build_delta_net_chunking(
|
|||
ggml_tensor * lin_solve = ggml_solve_tri(ctx0, lhs, attn, true, true, false);
|
||||
attn = ggml_mul(ctx0, lin_solve, causal_mask);
|
||||
attn = ggml_add(ctx0, attn, identity);
|
||||
|
||||
cb(attn, "attn_solved", il);
|
||||
cb(attn, "attn_solved", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
v = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, v_beta)), attn);
|
||||
|
||||
|
|
@ -226,116 +230,126 @@ ggml_tensor * llm_build_qwen3next::build_delta_net_chunking(
|
|||
ggml_tensor * gexp = ggml_exp(ctx0, g_cumsum_t);
|
||||
|
||||
ggml_tensor * kbeta_gexp = ggml_mul(ctx0, k_beta, gexp);
|
||||
|
||||
cb(kbeta_gexp, "kbeta_gexp", il);
|
||||
cb(kbeta_gexp, "kbeta_gexp", il); // shape: (S_k, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
ggml_tensor * k_cumdecay =
|
||||
ggml_cont(ctx0, ggml_transpose(ctx0, ggml_mul_mat(ctx0, attn, ggml_cont(ctx0, ggml_transpose(ctx0, kbeta_gexp)))));
|
||||
cb(k_cumdecay, "k_cumdecay", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
cb(k_cumdecay, "k_cumdecay", il);
|
||||
ggml_tensor * attn_kq = ggml_mul_mat(ctx0, k, q);
|
||||
attn_kq = ggml_mul(ctx0, attn_kq, decay_mask);
|
||||
attn_kq = ggml_mul(ctx0, attn_kq, diag_mask);
|
||||
cb(attn_kq, "attn_kq", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
|
||||
// vectorized calculation of key_gdiff
|
||||
// improved from the chunked version:
|
||||
// g_last = torch.clamp(g_cum[:, :, -1], max=50.0).exp().unsqueeze(-1).unsqueeze(-1)
|
||||
// g_diff = torch.clamp(g_cum[:, :, -1:] - g_cum, max=50.0).exp()
|
||||
// key_gdiff = key * g_diff.unsqueeze(-1)
|
||||
// kgdmulvnew = (key_gdiff).transpose(-1, -2) @ v_new
|
||||
// last_recurrent_state = last_recurrent_state * g_last + kgdmulvnew
|
||||
|
||||
// get last element in g_cumsum along chunk_size dimension (ne0)
|
||||
// example: [[x, y, z, ..., last], ...] -> [[last], ...]
|
||||
ggml_tensor * g_last = ggml_view_4d(ctx0, g_cumsum, 1, 1, g_cumsum->ne[2], g_cumsum->ne[3],
|
||||
g_cumsum->nb[1], g_cumsum->nb[2], g_cumsum->nb[3],
|
||||
(g_cumsum->ne[0] - 1) * ggml_element_size(g_cumsum));
|
||||
g_last = ggml_cont(ctx0, g_last);
|
||||
cb(g_last, "g_last", il); // shape: (1, 1, n_chunks, H_v * n_seqs)
|
||||
|
||||
ggml_tensor * g_last_exp = ggml_exp(ctx0, g_last);
|
||||
cb(g_last_exp, "g_last_exp", il); // shape: (1, 1, n_chunks, H_v * n_seqs)
|
||||
|
||||
ggml_tensor * g_diff = ggml_neg(ctx0, ggml_sub(ctx0, g_cumsum, g_last));
|
||||
cb(g_diff, "g_diff", il); // shape: (chunk_size, 1, n_chunks, H_v * n_seqs)
|
||||
|
||||
ggml_tensor * g_diff_exp = ggml_exp(ctx0, g_diff);
|
||||
ggml_tensor * key_gdiff = ggml_mul(ctx0, k, g_diff_exp);
|
||||
cb(key_gdiff, "key_gdiff", il); // shape: (S_k, chunk_size, n_chunks, H_v * n_seqs)
|
||||
|
||||
|
||||
// state to be updated per chunk
|
||||
ggml_tensor * new_state = state; // ggml_dup(ctx0, state);
|
||||
cb(new_state, "new_state", il); // shape: (S_v, S_v, H_v, n_seqs)
|
||||
|
||||
// shape after loop of chunks: (S_v, chunk_size, n_chunks, H_v * n_seqs)
|
||||
ggml_tensor * core_attn_out = nullptr;
|
||||
ggml_tensor * new_state = ggml_dup(ctx0, state);
|
||||
|
||||
cb(new_state, "new_state", il);
|
||||
|
||||
for (int64_t chunk = 0; chunk < n_chunks; chunk++) {
|
||||
auto chunkify = [=](ggml_tensor * t) {
|
||||
return ggml_cont(ctx0, ggml_view_4d(ctx0, t, t->ne[0], chunk_size, 1, t->ne[3],
|
||||
t->nb[1], t->nb[2], t->nb[3], t->nb[2] * chunk));
|
||||
};
|
||||
// shape: (S_k, chunk_size, 1, H_k * n_seqs)
|
||||
ggml_tensor * q_chunk = get_slice_2d(ctx0, q, chunk); // (no cont), next op: ggml_mul
|
||||
|
||||
auto chunkify_g = [=](ggml_tensor * t) {
|
||||
return ggml_cont(ctx0, ggml_view_4d(ctx0, t, chunk_size, t->ne[1], 1, t->ne[3],
|
||||
t->nb[1], t->nb[2], t->nb[3], t->nb[2] * chunk));
|
||||
};
|
||||
// shape: (S_v, chunk_size, 1, H_v * n_seqs)
|
||||
ggml_tensor * v_chunk = get_slice_2d(ctx0, v, chunk); // (no cont), next op: ggml_repeat
|
||||
|
||||
ggml_tensor * k_chunk = chunkify(k);
|
||||
ggml_tensor * q_chunk = chunkify(q);
|
||||
ggml_tensor * v_chunk = chunkify(v);
|
||||
// shape: (chunk_size, 1, n_chunks, H_v * n_seqs)
|
||||
ggml_tensor * gexp_chunk = get_slice_2d(ctx0, gexp, chunk); // (no cont), next op: ggml_mul
|
||||
|
||||
ggml_tensor * g_cs_chunk = chunkify_g(g_cumsum);
|
||||
ggml_tensor * g_cs_chunk_t = ggml_cont(ctx0, ggml_transpose(ctx0, g_cs_chunk));
|
||||
|
||||
ggml_tensor * decay_mask_chunk = chunkify(decay_mask);
|
||||
ggml_tensor * k_cumdecay_chunk = chunkify(k_cumdecay);
|
||||
|
||||
ggml_tensor * gexp_chunk = ggml_exp(ctx0, g_cs_chunk_t);
|
||||
// shape: (chunk_size, 1, H_v * n_seqs)
|
||||
ggml_tensor * k_cumdecay_chunk = get_slice_2d(ctx0, k_cumdecay, chunk); // (no cont), next op: ggml_mul_mat
|
||||
|
||||
// attn = (q_i @ k_i.transpose(-1, -2) * decay_mask[:, :, i]).masked_fill_(mask, 0)
|
||||
attn = ggml_mul_mat(ctx0, k_chunk, q_chunk);
|
||||
attn = ggml_mul(ctx0, attn, decay_mask_chunk);
|
||||
attn = ggml_mul(ctx0, attn, diag_mask);
|
||||
// replaced by precomputed attn_kq
|
||||
ggml_tensor * attn_chunk = get_slice_2d(ctx0, attn_kq, chunk);
|
||||
cb(attn_chunk, "attn_chunk", il);
|
||||
|
||||
ggml_tensor * state_t = ggml_cont_4d(ctx0, ggml_permute(ctx0, new_state, 1, 0, 2, 3), S_v, S_v, 1, H_v * n_seqs);
|
||||
|
||||
// v_prime = (k_cumdecay[:, :, i]) @ last_recurrent_state
|
||||
ggml_tensor * v_prime = ggml_mul_mat(ctx0, state_t, k_cumdecay_chunk);
|
||||
cb(v_prime, "v_prime_chunk", il); // shape: (S_v, 1, H_v * n_seqs)
|
||||
|
||||
// v_new = v_i - v_prime
|
||||
ggml_tensor * v_new = ggml_sub(ctx0, ggml_repeat(ctx0, v_chunk, v_prime), v_prime);
|
||||
ggml_tensor * v_new_t = ggml_cont(ctx0, ggml_transpose(ctx0, v_new));
|
||||
cb(v_new, "v_new_chunk", il);
|
||||
|
||||
// attn_inter = (q_i * g[:, :, i, :, None].exp()) @ last_recurrent_state
|
||||
ggml_tensor * q_g_exp = ggml_mul(ctx0, q_chunk, gexp_chunk);
|
||||
ggml_tensor * attn_inter = ggml_mul_mat(ctx0, state_t, q_g_exp);
|
||||
cb(attn_inter, "attn_inter_chunk", il);
|
||||
|
||||
// core_attn_out[:, :, i] = attn_inter + attn @ v_new
|
||||
ggml_tensor * v_attn = ggml_mul_mat(ctx0, v_new_t, attn);
|
||||
ggml_tensor * v_attn = ggml_mul_mat(ctx0, v_new_t, attn_chunk);
|
||||
cb(v_attn, "v_attn_chunk", il);
|
||||
|
||||
ggml_tensor * core_attn_out_chunk = ggml_add(ctx0, attn_inter, v_attn);
|
||||
cb(core_attn_out_chunk, "core_attn_out_chunk", il); // shape: (S_v, chunk_size, 1, H_v * n_seqs)
|
||||
|
||||
core_attn_out = core_attn_out == nullptr ? core_attn_out_chunk : ggml_concat(ctx0, core_attn_out, core_attn_out_chunk, 1);
|
||||
core_attn_out = core_attn_out == nullptr
|
||||
? core_attn_out_chunk
|
||||
: ggml_concat(ctx0, core_attn_out, core_attn_out_chunk, 2);
|
||||
|
||||
// g_last = torch.clamp(g_cum[:, :, -1], max=50.0).exp().unsqueeze(-1).unsqueeze(-1)
|
||||
// g_diff = torch.clamp(g_cum[:, :, -1:] - g_cum, max=50.0).exp()
|
||||
// key_gdiff = key * g_diff.unsqueeze(-1)
|
||||
// kgdmulvnew = (key_gdiff).transpose(-1, -2) @ v_new
|
||||
ggml_tensor * k_gdiff = ggml_cont(ctx0, get_slice_2d(ctx0, key_gdiff, chunk));
|
||||
//ggml_tensor * kgdmulvnew = ggml_mul_mat(ctx0, k_gdiff, v_new); // this is slower on metal, why?
|
||||
ggml_tensor * kgdmulvnew = ggml_mul_mat(ctx0, v_new_t, ggml_cont(ctx0, ggml_transpose(ctx0, k_gdiff)));
|
||||
|
||||
// last_recurrent_state = last_recurrent_state * g_last + kgdmulvnew
|
||||
|
||||
ggml_tensor * g_cum_last =
|
||||
ggml_cont(ctx0, ggml_view_4d(ctx0, g_cs_chunk_t, g_cs_chunk_t->ne[0], 1, g_cs_chunk_t->ne[2], g_cs_chunk_t->ne[3],
|
||||
g_cs_chunk_t->nb[1], g_cs_chunk_t->nb[2], g_cs_chunk_t->nb[3],
|
||||
g_cs_chunk_t->nb[0] * (g_cs_chunk_t->ne[1] - 1)));
|
||||
|
||||
ggml_tensor * gexp_last =
|
||||
ggml_reshape_4d(ctx0, ggml_exp(ctx0, g_cum_last), 1, 1, g_cum_last->ne[0] * g_cum_last->ne[2], g_cum_last->ne[3]);
|
||||
|
||||
ggml_tensor * g_cum_last_3d =
|
||||
ggml_reshape_3d(ctx0, g_cum_last, g_cum_last->ne[0], g_cum_last->ne[2], g_cum_last->ne[3]);
|
||||
|
||||
ggml_tensor * g_cumsum_3d = ggml_reshape_3d(ctx0, g_cs_chunk, g_cs_chunk->ne[0], g_cs_chunk->ne[2], g_cs_chunk->ne[3]);
|
||||
|
||||
ggml_tensor * g_diff = ggml_neg(ctx0, ggml_sub(ctx0, g_cumsum_3d, g_cum_last_3d));
|
||||
|
||||
ggml_tensor * g_diff_exp = ggml_exp(ctx0, g_diff);
|
||||
|
||||
ggml_tensor * key_gdiff = ggml_mul(ctx0, k_chunk,
|
||||
ggml_reshape_4d(ctx0, g_diff_exp, 1, g_diff_exp->ne[0], g_diff_exp->ne[1],
|
||||
g_diff_exp->ne[2] * g_diff_exp->ne[3]));
|
||||
|
||||
ggml_tensor * kgdmulvnew = ggml_mul_mat(ctx0, v_new_t, ggml_cont(ctx0, ggml_transpose(ctx0, key_gdiff)));
|
||||
|
||||
ggml_tensor * gexp_last_chunk = ggml_cont(ctx0, get_slice_2d(ctx0, g_last_exp, chunk));
|
||||
new_state = ggml_add(ctx0,
|
||||
ggml_mul(ctx0, new_state, ggml_reshape_4d(ctx0, gexp_last, gexp_last->ne[0], gexp_last->ne[1], H_v, n_seqs)),
|
||||
ggml_mul(ctx0, new_state, ggml_reshape_4d(ctx0, gexp_last_chunk, gexp_last_chunk->ne[0], gexp_last_chunk->ne[1], H_v, n_seqs)),
|
||||
ggml_reshape_4d(ctx0, kgdmulvnew, kgdmulvnew->ne[0], kgdmulvnew->ne[1], H_v, n_seqs));
|
||||
}
|
||||
|
||||
core_attn_out = ggml_cont_4d(ctx0, core_attn_out, S_v, chunk_size * n_chunks, H_v, n_seqs);
|
||||
|
||||
ggml_tensor * output_tokens = ggml_view_4d(ctx0, core_attn_out, S_v, n_tokens, H_v, n_seqs, core_attn_out->nb[1], core_attn_out->nb[2], core_attn_out->nb[3], 0);
|
||||
// truncate padded tokens
|
||||
ggml_tensor * output_tokens = ggml_view_4d(ctx0, core_attn_out,
|
||||
S_v, n_tokens, H_v, n_seqs,
|
||||
ggml_row_size(core_attn_out->type, S_v),
|
||||
ggml_row_size(core_attn_out->type, S_v * chunk_size * n_chunks),
|
||||
ggml_row_size(core_attn_out->type, S_v * chunk_size * n_chunks * H_v), 0);
|
||||
output_tokens = ggml_cont(ctx0, output_tokens);
|
||||
cb(output_tokens, "output_tokens", il);
|
||||
|
||||
// flatten output
|
||||
ggml_tensor * flat_output =
|
||||
ggml_cont_1d(ctx0, ggml_permute(ctx0, output_tokens, 0, 2, 1, 3), S_v * H_v * n_tokens * n_seqs);
|
||||
// permute back to (S_v, H_v, n_tokens, n_seqs)
|
||||
output_tokens = ggml_permute(ctx0, output_tokens, 0, 2, 1, 3);
|
||||
output_tokens = ggml_cont(ctx0, output_tokens);
|
||||
|
||||
ggml_tensor * flat_state = ggml_cont_1d(ctx0, new_state, S_v * S_v * H_v * n_seqs);
|
||||
|
||||
return ggml_concat(ctx0, flat_output, flat_state, 0);
|
||||
return {output_tokens, new_state};
|
||||
}
|
||||
|
||||
ggml_tensor * llm_build_qwen3next::build_delta_net_autoregressive(
|
||||
std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_delta_net_autoregressive(
|
||||
ggml_tensor * q,
|
||||
ggml_tensor * k,
|
||||
ggml_tensor * v,
|
||||
|
|
@ -419,11 +433,7 @@ ggml_tensor * llm_build_qwen3next::build_delta_net_autoregressive(
|
|||
cb(core_attn_out, "output_tokens", il);
|
||||
cb(state, "new_state", il);
|
||||
|
||||
// flatten output, no need to permute since n_tokens is 1 so [S_v, 1, H_v, n_seqs] and [S_v, H_v, 1, n_seqs] are equivalent memory-layout wise
|
||||
ggml_tensor * flat_output = ggml_reshape_1d(ctx0, core_attn_out, S_v * H_v * n_tokens * n_seqs);
|
||||
ggml_tensor * flat_state = ggml_reshape_1d(ctx0, state, S_v * S_v * H_v * n_seqs);
|
||||
|
||||
return ggml_concat(ctx0, flat_output, flat_state, 0);
|
||||
return {core_attn_out, state};
|
||||
}
|
||||
|
||||
ggml_tensor * llm_build_qwen3next::build_norm_gated(
|
||||
|
|
@ -523,6 +533,88 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn(
|
|||
return cur;
|
||||
}
|
||||
|
||||
std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_qkvz(
|
||||
ggml_tensor * input,
|
||||
int il) {
|
||||
const int64_t d_inner = hparams.ssm_d_inner;
|
||||
const int64_t n_seqs = ubatch.n_seqs;
|
||||
const int64_t head_k_dim = hparams.ssm_d_state;
|
||||
const int64_t num_k_heads = hparams.ssm_n_group;
|
||||
const int64_t num_v_heads = hparams.ssm_dt_rank;
|
||||
const int64_t head_v_dim = d_inner / num_v_heads;
|
||||
const int64_t n_seq_tokens = ubatch.n_seq_tokens;
|
||||
|
||||
if (model.layers[il].wqkv) {
|
||||
// optimized path
|
||||
ggml_tensor * qkv_mixed = build_lora_mm(model.layers[il].wqkv, input);
|
||||
qkv_mixed = ggml_reshape_3d(ctx0, qkv_mixed, qkv_mixed->ne[0], n_seq_tokens, n_seqs);
|
||||
cb(qkv_mixed, "linear_attn_qkv_mixed", il);
|
||||
|
||||
ggml_tensor * z = build_lora_mm(model.layers[il].wqkv_gate, input);
|
||||
cb(z, "z", il);
|
||||
|
||||
return { qkv_mixed, z };
|
||||
|
||||
} else {
|
||||
// legacy (slower) path
|
||||
ggml_tensor * mixed_qkvz = build_lora_mm(model.layers[il].ssm_in, input);
|
||||
cb(mixed_qkvz, "linear_attn_mixed_qkvz", il);
|
||||
|
||||
int64_t qkvz_new_dim = 2 * head_k_dim + 2 * head_v_dim * (num_v_heads / num_k_heads);
|
||||
ggml_tensor * mixed_qkvz_reshaped = ggml_reshape_4d(ctx0, mixed_qkvz, qkvz_new_dim, num_k_heads, n_seq_tokens, n_seqs);
|
||||
|
||||
// Split mixed_qkvz into query, key, value, z
|
||||
int64_t split_sizes_qkvz[4] = {
|
||||
head_k_dim, // query size
|
||||
head_k_dim, // key size
|
||||
head_v_dim * num_v_heads / num_k_heads, // value size
|
||||
head_v_dim * num_v_heads / num_k_heads // z size
|
||||
};
|
||||
|
||||
ggml_tensor * query =
|
||||
ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[0], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3], 0);
|
||||
cb(query, "q", il);
|
||||
|
||||
ggml_tensor * key = ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[1], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3],
|
||||
split_sizes_qkvz[0] * ggml_element_size(mixed_qkvz_reshaped));
|
||||
cb(key, "k", il);
|
||||
|
||||
ggml_tensor * value =
|
||||
ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[2], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3],
|
||||
(split_sizes_qkvz[0] + split_sizes_qkvz[1]) * ggml_element_size(mixed_qkvz_reshaped));
|
||||
cb(value, "v", il);
|
||||
|
||||
ggml_tensor * z = ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[3], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3],
|
||||
(split_sizes_qkvz[0] + split_sizes_qkvz[1] + split_sizes_qkvz[2]) * ggml_element_size(mixed_qkvz_reshaped));
|
||||
z = ggml_cont(ctx0, z);
|
||||
cb(z, "z", il);
|
||||
|
||||
// After creating query, key, and value_reshaped, reshape each to flatten the head dimensions
|
||||
// query: [head_k_dim, num_k_heads, n_tokens, n_seqs] -> [head_k_dim * num_k_heads, n_tokens, n_seqs]
|
||||
ggml_tensor * query_flat = ggml_cont_3d(ctx0, query, head_k_dim * num_k_heads, n_seq_tokens, n_seqs);
|
||||
cb(query_flat, "query_flat", il);
|
||||
|
||||
// key: [head_k_dim, num_k_heads, n_tokens, n_seqs] -> [head_k_dim * num_k_heads, n_tokens, n_seqs]
|
||||
ggml_tensor * key_flat = ggml_cont_3d(ctx0, key, head_k_dim * num_k_heads, n_seq_tokens, n_seqs);
|
||||
cb(key_flat, "key_flat", il);
|
||||
|
||||
// value_reshaped: [head_v_dim, num_v_heads, n_tokens, n_seqs] -> [head_v_dim * num_v_heads, n_tokens, n_seqs]
|
||||
ggml_tensor * value_flat = ggml_cont_3d(ctx0, value, head_v_dim * num_v_heads, n_seq_tokens, n_seqs);
|
||||
cb(value_flat, "value_flat", il);
|
||||
|
||||
// Now concatenate along the feature dimension (dim 0) to get [conv_dim, n_tokens, n_seqs]
|
||||
ggml_tensor * qkv_mixed = ggml_concat(ctx0, query_flat, key_flat, 0);
|
||||
qkv_mixed = ggml_concat(ctx0, qkv_mixed, value_flat, 0);
|
||||
cb(qkv_mixed, "qkv_mixed", il);
|
||||
|
||||
return { qkv_mixed, z };
|
||||
}
|
||||
}
|
||||
|
||||
ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
||||
llm_graph_input_rs * inp,
|
||||
ggml_tensor * cur,
|
||||
|
|
@ -547,15 +639,13 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
|
||||
|
||||
// Input projections
|
||||
ggml_tensor * mixed_qkvz = build_lora_mm(model.layers[il].ssm_in, cur);
|
||||
cb(mixed_qkvz, "linear_attn_mixed_qkvz", il);
|
||||
auto qkvz = build_qkvz(cur, il);
|
||||
ggml_tensor * qkv_mixed = qkvz.first;
|
||||
ggml_tensor * z = qkvz.second;
|
||||
|
||||
ggml_tensor * mixed_ba = build_lora_mm(model.layers[il].ssm_beta_alpha, cur);
|
||||
cb(mixed_ba, "linear_attn_mixed_ba", il);
|
||||
|
||||
int64_t qkvz_new_dim = 2 * head_k_dim + 2 * head_v_dim * (num_v_heads / num_k_heads);
|
||||
ggml_tensor * mixed_qkvz_reshaped = ggml_reshape_4d(ctx0, mixed_qkvz, qkvz_new_dim, num_k_heads, n_seq_tokens, n_seqs);
|
||||
|
||||
// Reshape mixed_ba: [batch, seq_len, hidden_size] -> [batch, seq_len, num_k_heads, 2*num_v_heads/num_k_heads]
|
||||
int64_t ba_new_dim = 2 * num_v_heads / num_k_heads;
|
||||
ggml_tensor * mixed_ba_reshaped = ggml_reshape_4d(ctx0, mixed_ba, ba_new_dim, num_k_heads, n_seq_tokens, n_seqs);
|
||||
|
|
@ -575,8 +665,9 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
split_sizes_ba[0] * ggml_element_size(mixed_ba_reshaped));
|
||||
cb(a, "a", il);
|
||||
|
||||
// Reshape b and a to merge head dimensions: [batch, seq_len, num_k_heads, num_v_heads/num_k_heads] -> [batch, seq_len, num_v_heads]
|
||||
ggml_tensor * beta = ggml_cont_3d(ctx0, b, num_v_heads, n_seq_tokens, n_seqs);
|
||||
ggml_tensor * beta = ggml_cont_4d(ctx0, b, num_v_heads, 1, n_seq_tokens, n_seqs);
|
||||
|
||||
// Reshape a to merge head dimensions: [batch, seq_len, num_k_heads, num_v_heads/num_k_heads] -> [batch, seq_len, num_v_heads]
|
||||
ggml_tensor * alpha = ggml_cont_3d(ctx0, a, num_v_heads, n_seq_tokens, n_seqs);
|
||||
|
||||
ggml_tensor * alpha_biased = ggml_add(ctx0, alpha, model.layers[il].ssm_dt);
|
||||
|
|
@ -585,48 +676,6 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
ggml_tensor * gate = ggml_mul(ctx0, alpha_softplus, model.layers[il].ssm_a); // -A_log.exp() * softplus
|
||||
cb(gate, "gate", il);
|
||||
|
||||
// Split mixed_qkvz into query, key, value, z
|
||||
int64_t split_sizes_qkvz[4] = {
|
||||
head_k_dim, // query size
|
||||
head_k_dim, // key size
|
||||
head_v_dim * num_v_heads / num_k_heads, // value size
|
||||
head_v_dim * num_v_heads / num_k_heads // z size
|
||||
};
|
||||
|
||||
ggml_tensor * query =
|
||||
ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[0], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3], 0);
|
||||
cb(query, "q", il);
|
||||
|
||||
ggml_tensor * key = ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[1], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3],
|
||||
split_sizes_qkvz[0] * sizeof(float));
|
||||
cb(key, "k", il);
|
||||
|
||||
ggml_tensor * value =
|
||||
ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[2], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3],
|
||||
(split_sizes_qkvz[0] + split_sizes_qkvz[1]) * sizeof(float));
|
||||
cb(value, "v", il);
|
||||
|
||||
ggml_tensor * z = ggml_view_4d(ctx0, mixed_qkvz_reshaped, split_sizes_qkvz[3], num_k_heads, n_seq_tokens, n_seqs,
|
||||
mixed_qkvz_reshaped->nb[1], mixed_qkvz_reshaped->nb[2], mixed_qkvz_reshaped->nb[3],
|
||||
(split_sizes_qkvz[0] + split_sizes_qkvz[1] + split_sizes_qkvz[2]) * sizeof(float));
|
||||
cb(z, "z", il);
|
||||
|
||||
// After creating query, key, and value_reshaped, reshape each to flatten the head dimensions
|
||||
// query: [head_k_dim, num_k_heads, n_tokens, n_seqs] -> [head_k_dim * num_k_heads, n_tokens, n_seqs]
|
||||
ggml_tensor * query_flat = ggml_cont_3d(ctx0, query, head_k_dim * num_k_heads, n_seq_tokens, n_seqs);
|
||||
cb(query_flat, "query_flat", il);
|
||||
|
||||
// key: [head_k_dim, num_k_heads, n_tokens, n_seqs] -> [head_k_dim * num_k_heads, n_tokens, n_seqs]
|
||||
ggml_tensor * key_flat = ggml_cont_3d(ctx0, key, head_k_dim * num_k_heads, n_seq_tokens, n_seqs);
|
||||
cb(key_flat, "key_flat", il);
|
||||
|
||||
// value_reshaped: [head_v_dim, num_v_heads, n_tokens, n_seqs] -> [head_v_dim * num_v_heads, n_tokens, n_seqs]
|
||||
ggml_tensor * value_flat = ggml_cont_3d(ctx0, value, head_v_dim * num_v_heads, n_seq_tokens, n_seqs);
|
||||
cb(value_flat, "value_flat", il);
|
||||
|
||||
// Get convolution states from cache
|
||||
ggml_tensor * conv_states_all = mctx_cur->get_r_l(il);
|
||||
ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il);
|
||||
|
|
@ -637,17 +686,6 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
ggml_tensor * conv_states = build_rs(inp, conv_states_all, hparams.n_embd_r(), n_seqs);
|
||||
cb(conv_states, "conv_states", il);
|
||||
|
||||
// Now concatenate along the feature dimension (dim 0) to get [conv_dim, n_tokens, n_seqs]
|
||||
ggml_tensor * qkv_mixed = ggml_concat(ctx0, query_flat, key_flat, 0);
|
||||
qkv_mixed = ggml_concat(ctx0, qkv_mixed, value_flat, 0);
|
||||
cb(qkv_mixed, "qkv_mixed", il);
|
||||
|
||||
qkv_mixed = ggml_permute(ctx0, qkv_mixed, 1, 0, 2, 3);
|
||||
cb(qkv_mixed, "qkv_mixed_permuted", il);
|
||||
|
||||
// Calculate the total conv dimension
|
||||
int64_t qkv_dim = head_k_dim * num_k_heads * 2 + head_v_dim * num_v_heads;
|
||||
|
||||
// Calculate convolution kernel size
|
||||
ggml_tensor * conv_kernel = model.layers[il].ssm_conv1d;
|
||||
const int64_t conv_kernel_size = conv_kernel->ne[0];
|
||||
|
|
@ -655,6 +693,9 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
conv_states = ggml_reshape_3d(ctx0, conv_states, conv_kernel_size - 1, conv_channels, n_seqs);
|
||||
cb(conv_states, "conv_states_reshaped", il);
|
||||
|
||||
qkv_mixed = ggml_permute(ctx0, qkv_mixed, 1, 0, 2, 3);
|
||||
cb(qkv_mixed, "qkv_mixed_permuted", il);
|
||||
|
||||
ggml_tensor * conv_input = ggml_concat(ctx0, conv_states, qkv_mixed, 0);
|
||||
cb(conv_input, "conv_input", il);
|
||||
|
||||
|
|
@ -677,26 +718,25 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
ggml_tensor * conv_output_proper = ggml_ssm_conv(ctx0, conv_input, conv_kernel);
|
||||
cb(conv_output_proper, "conv_output_raw", il);
|
||||
|
||||
conv_output_proper = ggml_cont(ctx0, ggml_transpose(ctx0, conv_output_proper));
|
||||
cb(conv_output_proper, "conv_output_pre_silu", il);
|
||||
|
||||
ggml_tensor * conv_output_silu = ggml_silu(ctx0, conv_output_proper);
|
||||
cb(conv_output_silu, "conv_output_silu", il);
|
||||
|
||||
ggml_tensor * conv_qkv_mix =
|
||||
ggml_cont_2d(ctx0, ggml_transpose(ctx0, conv_output_silu), qkv_dim, n_seq_tokens * n_seqs);
|
||||
cb(conv_qkv_mix, "conv_qkv_mix", il);
|
||||
ggml_tensor * conv_qkv_mix = conv_output_silu;
|
||||
|
||||
// Calculate the total conv dimension
|
||||
int64_t qkv_dim = head_k_dim * num_k_heads * 2 + head_v_dim * num_v_heads;
|
||||
int64_t nb1_qkv = ggml_row_size(conv_qkv_mix->type, qkv_dim);
|
||||
|
||||
// Extract the convolved Q, K, V from conv_output
|
||||
ggml_tensor * q_conv =
|
||||
ggml_view_2d(ctx0, conv_qkv_mix, head_k_dim * num_k_heads, n_seq_tokens * n_seqs, conv_qkv_mix->nb[1], 0);
|
||||
ggml_view_2d(ctx0, conv_qkv_mix, head_k_dim * num_k_heads, n_seq_tokens * n_seqs, nb1_qkv, 0);
|
||||
cb(q_conv, "q_conv", il);
|
||||
ggml_tensor * k_conv =
|
||||
ggml_view_2d(ctx0, conv_qkv_mix, head_k_dim * num_k_heads, n_seq_tokens * n_seqs, conv_qkv_mix->nb[1],
|
||||
ggml_view_2d(ctx0, conv_qkv_mix, head_k_dim * num_k_heads, n_seq_tokens * n_seqs, nb1_qkv,
|
||||
head_k_dim * num_k_heads * ggml_element_size(conv_qkv_mix));
|
||||
cb(k_conv, "k_conv", il);
|
||||
ggml_tensor * v_conv =
|
||||
ggml_view_2d(ctx0, conv_qkv_mix, head_v_dim * num_v_heads, n_seq_tokens * n_seqs, conv_qkv_mix->nb[1],
|
||||
ggml_view_2d(ctx0, conv_qkv_mix, head_v_dim * num_v_heads, n_seq_tokens * n_seqs, nb1_qkv,
|
||||
2 * head_k_dim * num_k_heads * ggml_element_size(conv_qkv_mix));
|
||||
cb(v_conv, "v_conv", il);
|
||||
|
||||
|
|
@ -705,8 +745,6 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
k_conv = ggml_cont_4d(ctx0, k_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
||||
v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
||||
|
||||
beta = ggml_cont_4d(ctx0, b, num_v_heads, 1, n_seq_tokens, n_seqs);
|
||||
|
||||
ggml_tensor * state = build_rs(inp, ssm_states_all, hparams.n_embd_s(), n_seqs);
|
||||
state = ggml_reshape_4d(ctx0, state, head_v_dim, head_v_dim * num_v_heads, 1, n_seqs);
|
||||
cb(state, "state_predelta", il);
|
||||
|
|
@ -738,45 +776,29 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|||
cb(v_conv, "v_conv_predelta", il);
|
||||
|
||||
// Choose between build_delta_net_chunking, build_delta_net_recurrent, and build_delta_net_autoregressive based on n_tokens
|
||||
ggml_tensor * attn_out;
|
||||
std::pair<ggml_tensor *, ggml_tensor *> attn_out; // pair of (output, new_state)
|
||||
if (n_seq_tokens == 1) {
|
||||
attn_out = build_delta_net_autoregressive(q_conv, k_conv, v_conv, gate, beta, state, il);
|
||||
} else {
|
||||
attn_out = build_delta_net_chunking(q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il);
|
||||
}
|
||||
cb(attn_out, "attn_out", il);
|
||||
|
||||
// The tensors were concatenated 1d, so we need to extract them 1d as well
|
||||
const int64_t output_flat_size = head_v_dim * num_v_heads * n_seq_tokens * n_seqs;
|
||||
ggml_tensor * attn_out_1d = ggml_view_1d(ctx0, attn_out, output_flat_size, 0);
|
||||
cb(attn_out_1d, "attn_out_1d", il);
|
||||
|
||||
ggml_tensor * attn_out_final = ggml_cont_4d(ctx0, attn_out_1d, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
||||
cb(attn_out_final, "attn_out_reshaped", il);
|
||||
|
||||
// Extract the state part (second part of the concatenated tensor)
|
||||
// State starts after n_tokens elements along dimension 1
|
||||
const int64_t state_flat_size = head_v_dim * head_v_dim * num_v_heads * n_seqs;
|
||||
|
||||
ggml_tensor * state_1d =
|
||||
ggml_view_1d(ctx0, attn_out, state_flat_size, output_flat_size * ggml_element_size(attn_out));
|
||||
cb(state_1d, "state_1d", il);
|
||||
ggml_tensor * output = attn_out.first;
|
||||
ggml_tensor * new_state = attn_out.second;
|
||||
cb(output, "attn_output", il);
|
||||
cb(new_state, "new_state", il);
|
||||
|
||||
// Update the recurrent states
|
||||
ggml_build_forward_expand(gf,
|
||||
ggml_cpy(ctx0, state_1d,
|
||||
ggml_cpy(ctx0, new_state,
|
||||
ggml_view_1d(ctx0, ssm_states_all, hparams.n_embd_s() * n_seqs,
|
||||
kv_head * hparams.n_embd_s() * ggml_element_size(ssm_states_all))));
|
||||
|
||||
GGML_ASSERT(ggml_nelements(attn_out_1d) + ggml_nelements(state_1d) == ggml_nelements(attn_out));
|
||||
|
||||
// Reshape both attn_out_final and z to 2D tensors for normalization
|
||||
// attn_out_final: [head_dim, n_heads, n_tokens, n_seqs] -> [n_heads * n_tokens * n_seqs, head_dim]
|
||||
ggml_tensor * attn_out_2d_final =
|
||||
ggml_cont_2d(ctx0, attn_out_final, head_v_dim, num_v_heads * n_seq_tokens * n_seqs);
|
||||
ggml_tensor * attn_out_2d_final = ggml_reshape_2d(ctx0, output, head_v_dim, num_v_heads * n_seq_tokens * n_seqs);
|
||||
|
||||
// z: [head_dim, n_heads, n_tokens, n_seqs] -> [n_heads * n_tokens * n_seqs, head_dim]
|
||||
ggml_tensor * z_2d = ggml_cont_2d(ctx0, z, head_v_dim, num_v_heads * n_seq_tokens * n_seqs);
|
||||
ggml_tensor * z_2d = ggml_reshape_2d(ctx0, z, head_v_dim, num_v_heads * n_seq_tokens * n_seqs);
|
||||
|
||||
// Apply gated normalization: self.norm(core_attn_out, z)
|
||||
ggml_tensor * attn_out_norm = build_norm_gated(attn_out_2d_final, model.layers[il].ssm_norm, z_2d, il);
|
||||
|
|
@ -828,12 +850,6 @@ ggml_tensor * llm_build_qwen3next::build_layer_ffn(ggml_tensor * cur, const int
|
|||
shared_gate = ggml_sigmoid(ctx0, shared_gate);
|
||||
cb(shared_gate, "shared_expert_gate_sigmoid", il);
|
||||
|
||||
// The gate needs to be broadcast to match the dimensions of ffn_shexp
|
||||
// ffn_shexp is [n_embd, n_tokens, 1, 1] and shared_gate is [1, n_tokens, 1, 1]
|
||||
// We need to repeat the gate along the feature dimension
|
||||
shared_gate = ggml_repeat(ctx0, shared_gate, ffn_shexp);
|
||||
cb(shared_gate, "shared_expert_gate_broadcast", il);
|
||||
|
||||
// Apply the gate to the shared expert output
|
||||
ffn_shexp = ggml_mul(ctx0, ffn_shexp, shared_gate);
|
||||
cb(ffn_shexp, "ffn_shexp_gated", il);
|
||||
|
|
|
|||
|
|
@ -223,15 +223,6 @@ llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
|
|||
llama_build_and_test(test-autorelease.cpp LABEL "model")
|
||||
llama_build_and_test(test-backend-sampler.cpp LABEL "model")
|
||||
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-greedy ARGS --test greedy)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-temp ARGS --test temp)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-top_k ARGS --test top_k)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-dist ARGS --test dist)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-dist-and-cpu ARGS --test dist_and_cpu)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-logit-bias ARGS --test logit_bias)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-mul_seq ARGS --test multi_sequence)
|
||||
llama_test(test-backend-sampler NAME test-backend-sampler-set-sampler ARGS --test set_sampler)
|
||||
|
||||
# Test for state restore with fragmented KV cache
|
||||
# Requires a model, uses same args pattern as test-thread-safety
|
||||
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
|
||||
|
|
|
|||
|
|
@ -7560,6 +7560,11 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
|
|||
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 1700000, 96, 2592, {1, 1}, {1, 1}));
|
||||
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 1700000, 3, 2592, {1, 1}, {1, 1}));
|
||||
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 1700000, 1, 2592, {1, 1}, {1, 1}));
|
||||
|
||||
test_cases.emplace_back(new test_mul_mat_id(GGML_TYPE_Q8_0, GGML_TYPE_F32, 128, 128, false, 8192, 2, 5120)); // Llama-4-Maverick-17B-128E-PAB-Q8_0
|
||||
test_cases.emplace_back(new test_mul_mat_id(GGML_TYPE_Q8_0, GGML_TYPE_F32, 128, 128, false, 8192, 1, 5120)); // Llama-4-Maverick-17B-128E-PAB-Q8_0
|
||||
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q8_0, GGML_TYPE_F32, 8192, 1, 5120, {128, 1}, {1, 1}));
|
||||
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q8_0, GGML_TYPE_F32, 8192, 512, 5120, {128, 1}, {1, 1}));
|
||||
#endif
|
||||
|
||||
for (ggml_type type_a : all_types) {
|
||||
|
|
|
|||
|
|
@ -11,76 +11,78 @@
|
|||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
struct backend_cli_args {
|
||||
const char * model = nullptr;
|
||||
const char * test = nullptr;
|
||||
const char * device = "cpu";
|
||||
struct test_args {
|
||||
std::string model;
|
||||
std::string test;
|
||||
std::string device = "auto";
|
||||
};
|
||||
|
||||
struct test_model_context {
|
||||
llama_model_ptr model;
|
||||
struct test_params {
|
||||
llama_model_ptr model;
|
||||
};
|
||||
|
||||
static llama_model_ptr load_model(const test_args & args) {
|
||||
auto mparams = llama_model_default_params();
|
||||
|
||||
ggml_backend_dev_t devs[2] = { nullptr, nullptr };
|
||||
|
||||
if (args.device != "auto") {
|
||||
if (args.device == "gpu") {
|
||||
devs[0] = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU);
|
||||
|
||||
if (devs[0] == nullptr) {
|
||||
fprintf(stderr, "Error: GPU requested but not available\n");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
mparams.n_gpu_layers = 999;
|
||||
} else if (args.device == "cpu") {
|
||||
devs[0] = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
|
||||
|
||||
mparams.n_gpu_layers = 0;
|
||||
} else {
|
||||
fprintf(stderr, "Error: invalid device '%s'\n", args.device.c_str());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
mparams.devices = devs;
|
||||
|
||||
fprintf(stderr, "Using device: %s\n", ggml_backend_dev_name(devs[0]));
|
||||
}
|
||||
|
||||
llama_model_ptr res;
|
||||
|
||||
res.reset(llama_model_load_from_file(args.model.c_str(), mparams));
|
||||
|
||||
if (!res) {
|
||||
fprintf(stderr, "Warning: failed to load model '%s', skipping test\n", args.model.c_str());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
struct test_context {
|
||||
llama_context_ptr ctx;
|
||||
int n_vocab = 0;
|
||||
|
||||
int n_vocab = 0;
|
||||
|
||||
const llama_vocab * vocab = nullptr;
|
||||
|
||||
std::unordered_map<llama_seq_id, int32_t> seq_positions;
|
||||
std::unordered_map<llama_seq_id, int32_t> last_batch_info;
|
||||
|
||||
bool load_model(const backend_cli_args & args) {
|
||||
if (model) {
|
||||
return true;
|
||||
}
|
||||
test_context(const test_params & params, std::vector<llama_sampler_seq_config> & configs, int32_t n_seq_max = -1) {
|
||||
auto * model = params.model.get();
|
||||
|
||||
llama_backend_init();
|
||||
|
||||
auto mparams = llama_model_default_params();
|
||||
|
||||
ggml_backend_dev_t devs[2];
|
||||
if (std::string_view(args.device) == "gpu") {
|
||||
ggml_backend_dev_t gpu = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU);
|
||||
if (gpu == nullptr) {
|
||||
fprintf(stderr, "Error: GPU requested but not available\n");
|
||||
return false;
|
||||
}
|
||||
devs[0] = gpu;
|
||||
devs[1] = nullptr; // null terminator
|
||||
mparams.devices = devs;
|
||||
mparams.n_gpu_layers = 999;
|
||||
} else if (std::string_view(args.device) == "cpu") {
|
||||
ggml_backend_dev_t cpu = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
|
||||
devs[0] = cpu;
|
||||
devs[1] = nullptr; // null terminator
|
||||
mparams.devices = devs;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Using device: %s\n", ggml_backend_dev_name(devs[0]));
|
||||
|
||||
model.reset(llama_model_load_from_file(args.model, mparams));
|
||||
|
||||
if (!model) {
|
||||
fprintf(stderr, "Warning: failed to load model '%s', skipping test\n", args.model);
|
||||
return false;
|
||||
}
|
||||
n_vocab = llama_vocab_n_tokens(get_vocab());
|
||||
fprintf(stderr, "Vocabulary size: %d\n", n_vocab);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool setup(const backend_cli_args & args, std::vector<llama_sampler_seq_config> & configs, int32_t n_seq_max = -1) {
|
||||
if (!model) {
|
||||
load_model(args);
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
return true;
|
||||
}
|
||||
GGML_ASSERT(model);
|
||||
GGML_ASSERT(!ctx);
|
||||
|
||||
llama_context_params cparams = llama_context_default_params();
|
||||
cparams.n_ctx = 512;
|
||||
|
|
@ -99,26 +101,23 @@ struct test_model_context {
|
|||
cparams.n_seq_max = n_seq_max;
|
||||
}
|
||||
|
||||
ctx.reset(llama_init_from_model(model.get(), cparams));
|
||||
ctx.reset(llama_init_from_model(model, cparams));
|
||||
if (!ctx) {
|
||||
fprintf(stderr, "Warning: failed to create context, skipping test\n");
|
||||
return false;
|
||||
throw std::runtime_error("failed to create context");
|
||||
}
|
||||
|
||||
llama_set_warmup(ctx.get(), false);
|
||||
|
||||
return true;
|
||||
vocab = llama_model_get_vocab(model);
|
||||
n_vocab = llama_vocab_n_tokens(vocab);
|
||||
}
|
||||
|
||||
bool decode(const std::map<llama_seq_id, std::string> & prompts) {
|
||||
if (!ctx) {
|
||||
fprintf(stderr, "Error: context not initialized, call setup() first\n");
|
||||
return false;
|
||||
}
|
||||
GGML_ASSERT(ctx);
|
||||
|
||||
last_batch_info.clear();
|
||||
llama_batch batch = llama_batch_init(512, 0, prompts.size());
|
||||
|
||||
auto vocab = get_vocab();
|
||||
for (const auto & [seq_id, prompt] : prompts) {
|
||||
std::vector<llama_token> tokens;
|
||||
tokens.push_back(llama_vocab_bos(vocab));
|
||||
|
|
@ -199,10 +198,7 @@ struct test_model_context {
|
|||
}
|
||||
|
||||
bool decode_token(llama_token token, llama_seq_id seq_id = 0) {
|
||||
if (ctx == nullptr) {
|
||||
fprintf(stderr, "Error: context not initialized, call setup() first\n");
|
||||
return false;
|
||||
}
|
||||
GGML_ASSERT(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(1, 0, 1);
|
||||
int32_t pos = seq_positions[seq_id];
|
||||
|
|
@ -218,14 +214,12 @@ struct test_model_context {
|
|||
|
||||
seq_positions[seq_id]++;
|
||||
llama_batch_free(batch);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool decode_tokens(const std::map<llama_seq_id, llama_token> & seq_tokens) {
|
||||
if (ctx == nullptr) {
|
||||
fprintf(stderr, "Error: context not initialized, call setup() first\n");
|
||||
return false;
|
||||
}
|
||||
GGML_ASSERT(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(seq_tokens.size(), 0, seq_tokens.size());
|
||||
|
||||
|
|
@ -247,40 +241,27 @@ struct test_model_context {
|
|||
update_batch_info(batch);
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string token_to_piece(llama_token token, bool special) {
|
||||
std::string token_to_piece(llama_token token, bool special) const {
|
||||
std::string piece;
|
||||
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
|
||||
const int n_chars = llama_token_to_piece(get_vocab(), token, &piece[0], piece.size(), 0, special);
|
||||
const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
|
||||
if (n_chars < 0) {
|
||||
piece.resize(-n_chars);
|
||||
int check = llama_token_to_piece(get_vocab(), token, &piece[0], piece.size(), 0, special);
|
||||
int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
|
||||
GGML_ASSERT(check == -n_chars);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
piece.resize(n_chars);
|
||||
}
|
||||
|
||||
return piece;
|
||||
}
|
||||
|
||||
void reset() {
|
||||
ctx.reset();
|
||||
seq_positions.clear();
|
||||
last_batch_info.clear();
|
||||
}
|
||||
|
||||
const llama_vocab * get_vocab() const {
|
||||
return model ? llama_model_get_vocab(model.get()) : nullptr;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
static void test_backend_greedy_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_greedy_sampling(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
|
||||
struct llama_sampler_chain_params backend_sampler_params = llama_sampler_chain_default_params();
|
||||
|
|
@ -289,9 +270,7 @@ static void test_backend_greedy_sampling(const backend_cli_args & args) {
|
|||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_greedy());
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Some"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -317,9 +296,7 @@ static void test_backend_greedy_sampling(const backend_cli_args & args) {
|
|||
}
|
||||
}
|
||||
|
||||
static void test_backend_top_k_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_top_k_sampling(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
const int32_t k = 8;
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
|
|
@ -327,9 +304,7 @@ static void test_backend_top_k_sampling(const backend_cli_args & args) {
|
|||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_top_k(k));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Hello"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -358,16 +333,12 @@ static void test_backend_top_k_sampling(const backend_cli_args & args) {
|
|||
|
||||
llama_sampler_chain_add(chain.get(), llama_sampler_init_dist(18));
|
||||
llama_token token = llama_sampler_sample(chain.get(), test_ctx.ctx.get(), batch_idx);
|
||||
const std::string token_str = test_ctx.token_to_piece(token, false);
|
||||
GGML_ASSERT(token >= 0 && token < test_ctx.n_vocab);
|
||||
|
||||
printf("backend top-k hybrid sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_temp_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
|
||||
static void test_backend_temp_sampling(const test_params & params) {
|
||||
{
|
||||
const float temp_0 = 0.8f;
|
||||
struct llama_sampler_chain_params backend_chain_params_0 = llama_sampler_chain_default_params();
|
||||
|
|
@ -384,9 +355,7 @@ static void test_backend_temp_sampling(const backend_cli_args & args) {
|
|||
{ 1, backend_sampler_chain_1.get() }
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{0, "Some where over the"}, {1, "Once upon a"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -430,8 +399,6 @@ static void test_backend_temp_sampling(const backend_cli_args & args) {
|
|||
auto test_argmax_temp = [&](float temp) {
|
||||
printf("\nTesting temperature = %.1f\n", temp);
|
||||
|
||||
test_ctx.reset();
|
||||
|
||||
int seq_id = 0;
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
|
|
@ -441,9 +408,7 @@ static void test_backend_temp_sampling(const backend_cli_args & args) {
|
|||
{ seq_id, backend_sampler_chain.get() },
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Once"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -459,12 +424,9 @@ static void test_backend_temp_sampling(const backend_cli_args & args) {
|
|||
test_argmax_temp(-1.0f);
|
||||
|
||||
printf("backend temp sampling test PASSED\n");
|
||||
|
||||
}
|
||||
|
||||
static void test_backend_temp_ext_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_temp_ext_sampling(const test_params & params) {
|
||||
{
|
||||
int seq_id = 0;
|
||||
const float temp = 0.8f;
|
||||
|
|
@ -478,9 +440,7 @@ static void test_backend_temp_ext_sampling(const backend_cli_args & args) {
|
|||
{ seq_id, backend_sampler_chain.get() },
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Once upon a"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -494,14 +454,10 @@ static void test_backend_temp_ext_sampling(const backend_cli_args & args) {
|
|||
}
|
||||
}
|
||||
|
||||
test_ctx.reset();
|
||||
|
||||
// lambda to testing non-positive temp/delta/exponent values.
|
||||
auto test_argmax_temp = [&](float temp, float delta, float exponent) {
|
||||
printf("\nTesting temperature = %.1f, delta = %1.f, exponent = %1.f\n", temp, delta, exponent);
|
||||
|
||||
test_ctx.reset();
|
||||
|
||||
int seq_id = 0;
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
|
|
@ -511,9 +467,7 @@ static void test_backend_temp_ext_sampling(const backend_cli_args & args) {
|
|||
{ seq_id, backend_sampler_chain.get() },
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Once"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -535,12 +489,9 @@ static void test_backend_temp_ext_sampling(const backend_cli_args & args) {
|
|||
test_argmax_temp(0.8f, 0.0f, 2.0f); // Temperature scaling
|
||||
|
||||
printf("backend temp_ext sampling test PASSED\n");
|
||||
|
||||
}
|
||||
|
||||
static void test_backend_min_p_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_min_p_sampling(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
const float p = 0.1;
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
|
|
@ -548,9 +499,7 @@ static void test_backend_min_p_sampling(const backend_cli_args & args) {
|
|||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_min_p(p, 0));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Hello"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -594,9 +543,7 @@ static void test_backend_min_p_sampling(const backend_cli_args & args) {
|
|||
printf("min-p sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_top_p_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_top_p_sampling(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
const float p = 0.9;
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
|
|
@ -604,9 +551,7 @@ static void test_backend_top_p_sampling(const backend_cli_args & args) {
|
|||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_top_p(p, 0));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Hello"}})) {
|
||||
return;
|
||||
|
|
@ -648,9 +593,7 @@ static void test_backend_top_p_sampling(const backend_cli_args & args) {
|
|||
printf("top-p sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_multi_sequence_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_multi_sequence_sampling(const test_params & params) {
|
||||
struct llama_sampler_chain_params chain_params_0 = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr sampler_chain_0(llama_sampler_chain_init(chain_params_0));
|
||||
llama_sampler_chain_add(sampler_chain_0.get(), llama_sampler_init_greedy());
|
||||
|
|
@ -665,9 +608,7 @@ static void test_backend_multi_sequence_sampling(const backend_cli_args & args)
|
|||
{ 1, sampler_chain_1.get() }
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
std::map<llama_seq_id, std::string> prompts = {
|
||||
{0, "Hello"},
|
||||
|
|
@ -718,19 +659,16 @@ static void test_backend_multi_sequence_sampling(const backend_cli_args & args)
|
|||
printf("backend multi-sequence sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_dist_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_dist_sampling(const test_params & params) {
|
||||
const int seq_id = 189;
|
||||
const int32_t seed = 88;
|
||||
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_dist(seed));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Some"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -749,19 +687,16 @@ static void test_backend_dist_sampling(const backend_cli_args & args) {
|
|||
printf("backend dist sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_dist_sampling_and_cpu(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_dist_sampling_and_cpu(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
const int32_t seed = 88;
|
||||
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_dist(seed));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Some"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -782,31 +717,31 @@ static void test_backend_dist_sampling_and_cpu(const backend_cli_args & args) {
|
|||
printf("backend dist & cpu sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_logit_bias_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
// Calling load_model to ensure vocab is loaded and can be accessed
|
||||
if (!test_ctx.load_model(args)) {
|
||||
return;
|
||||
}
|
||||
static void test_backend_logit_bias_sampling(const test_params & params) {
|
||||
const auto * model = params.model.get();
|
||||
const auto * vocab = llama_model_get_vocab(model);
|
||||
|
||||
const int seq_id = 0;
|
||||
|
||||
// Create the logit biases vector.
|
||||
std::vector<llama_logit_bias> logit_bias;
|
||||
|
||||
// Get the token for the piece "World".
|
||||
const std::string piece = "World";
|
||||
std::vector<llama_token> tokens(16);
|
||||
llama_tokenize(test_ctx.get_vocab(), piece.c_str(), piece.size(), tokens.data(), tokens.size(), false, false);
|
||||
llama_tokenize(vocab, piece.c_str(), piece.size(), tokens.data(), tokens.size(), false, false);
|
||||
|
||||
llama_token bias_token = tokens[0];
|
||||
logit_bias.push_back({ bias_token, +100.0f });
|
||||
// TODO: biasing too much here makes the Vulkan sampling fail - should be investigated further
|
||||
// https://github.com/ggml-org/llama.cpp/actions/runs/20894267644/job/60030252675?pr=18753#step:3:23350
|
||||
//logit_bias.push_back({ bias_token, +100.0f });
|
||||
logit_bias.push_back({ bias_token, +10.0f });
|
||||
|
||||
printf("biasing token piece '%s' -> token id %d\n", piece.c_str(), bias_token);
|
||||
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_logit_bias(
|
||||
llama_vocab_n_tokens(test_ctx.get_vocab()),
|
||||
llama_vocab_n_tokens(vocab),
|
||||
logit_bias.size(),
|
||||
logit_bias.data()));
|
||||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_dist(88));
|
||||
|
|
@ -815,17 +750,14 @@ static void test_backend_logit_bias_sampling(const backend_cli_args & args) {
|
|||
{ seq_id, backend_sampler_chain.get() },
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Hello"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
}
|
||||
|
||||
llama_token backend_token = llama_get_sampled_token_ith(test_ctx.ctx.get(), test_ctx.idx_for_seq(seq_id));
|
||||
const std::string backend_token_str = test_ctx.token_to_piece(backend_token, false);
|
||||
printf("logit bias sampled token = %d, string='%s'\n", backend_token, backend_token_str.c_str());
|
||||
printf("sampled token = %d, expected = %d\n", backend_token, bias_token);
|
||||
GGML_ASSERT(backend_token == bias_token);
|
||||
|
||||
printf("backend logit bias sampling test PASSED\n");
|
||||
|
|
@ -833,9 +765,7 @@ static void test_backend_logit_bias_sampling(const backend_cli_args & args) {
|
|||
|
||||
// This test verifies that it is possible to have two different backend sampler,
|
||||
// one that uses the backend dist sampler, and another that uses CPU dist sampler.
|
||||
static void test_backend_mixed_sampling(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_mixed_sampling(const test_params & params) {
|
||||
struct llama_sampler_chain_params chain_params_0 = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr sampler_chain_0(llama_sampler_chain_init(chain_params_0));
|
||||
llama_sampler_chain_add(sampler_chain_0.get(), llama_sampler_init_dist(88));
|
||||
|
|
@ -850,9 +780,7 @@ static void test_backend_mixed_sampling(const backend_cli_args & args) {
|
|||
{ 1, sampler_chain_1.get() }
|
||||
};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
std::map<llama_seq_id, std::string> prompts = {
|
||||
{0, "Hello"},
|
||||
|
|
@ -887,19 +815,16 @@ static void test_backend_mixed_sampling(const backend_cli_args & args) {
|
|||
printf("backend mixed sampling test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_set_sampler(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
const int32_t seed = 88;
|
||||
static void test_backend_set_sampler(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
const int32_t seed = 88;
|
||||
|
||||
struct llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_dist(seed));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
if (!test_ctx.decode({{seq_id, "Hello"}})) {
|
||||
GGML_ASSERT(false && "Failed to decode token");
|
||||
|
|
@ -955,9 +880,7 @@ static void test_backend_set_sampler(const backend_cli_args & args) {
|
|||
printf("backend set sampler test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_cpu_mixed_batch(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_cpu_mixed_batch(const test_params & params) {
|
||||
// Sequence 0 uses backend sampling
|
||||
struct llama_sampler_chain_params chain_params_0 = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr sampler_chain_0(llama_sampler_chain_init(chain_params_0));
|
||||
|
|
@ -968,12 +891,10 @@ static void test_backend_cpu_mixed_batch(const backend_cli_args & args) {
|
|||
};
|
||||
|
||||
// We need 2 sequences: seq 0 with backend sampling, seq 1 with CPU sampling
|
||||
if (!test_ctx.setup(args, backend_sampler_configs, 2)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs, 2);
|
||||
|
||||
std::map<llama_seq_id, std::string> prompts = {
|
||||
{0, "Hello"}, // Will use backend sampling
|
||||
{0, "Hello"}, // Will use backend sampling
|
||||
{1, "Some"} // Will use CPU sampling
|
||||
};
|
||||
|
||||
|
|
@ -1047,28 +968,25 @@ static void test_backend_cpu_mixed_batch(const backend_cli_args & args) {
|
|||
printf("backend-cpu mixed batch test PASSED\n");
|
||||
}
|
||||
|
||||
static void test_backend_max_outputs(const backend_cli_args & args) {
|
||||
test_model_context test_ctx;
|
||||
|
||||
static void test_backend_max_outputs(const test_params & params) {
|
||||
const int seq_id = 0;
|
||||
const int32_t seed = 88;
|
||||
|
||||
llama_sampler_chain_params backend_chain_params = llama_sampler_chain_default_params();
|
||||
llama_sampler_ptr backend_sampler_chain(llama_sampler_chain_init(backend_chain_params));
|
||||
llama_sampler_chain_add(backend_sampler_chain.get(), llama_sampler_init_dist(seed));
|
||||
std::vector<llama_sampler_seq_config> backend_sampler_configs = {{ seq_id, backend_sampler_chain.get() }};
|
||||
|
||||
if (!test_ctx.setup(args, backend_sampler_configs)) {
|
||||
return;
|
||||
}
|
||||
test_context test_ctx(params, backend_sampler_configs);
|
||||
|
||||
llama_batch batch = llama_batch_init(512, 0, 1);
|
||||
std::string prompt = "Hello";
|
||||
|
||||
std::vector<llama_token> tokens;
|
||||
tokens.push_back(llama_vocab_bos(test_ctx.get_vocab()));
|
||||
tokens.push_back(llama_vocab_bos(test_ctx.vocab));
|
||||
|
||||
std::vector<llama_token> prompt_tokens(32);
|
||||
int n_tokens = llama_tokenize(test_ctx.get_vocab(), prompt.c_str(), prompt.length(),
|
||||
int n_tokens = llama_tokenize(test_ctx.vocab, prompt.c_str(), prompt.length(),
|
||||
prompt_tokens.data(), prompt_tokens.size(),
|
||||
false, false);
|
||||
for (int i = 0; i < n_tokens; i++) {
|
||||
|
|
@ -1090,8 +1008,8 @@ static void test_backend_max_outputs(const backend_cli_args & args) {
|
|||
}
|
||||
|
||||
struct backend_test_case {
|
||||
const char * name;
|
||||
void (*fn)(const backend_cli_args &);
|
||||
std::string name;
|
||||
void (*fn)(const test_params &);
|
||||
bool enabled_by_default;
|
||||
};
|
||||
|
||||
|
|
@ -1112,8 +1030,8 @@ static const backend_test_case BACKEND_TESTS[] = {
|
|||
{ "top_p", test_backend_top_p_sampling, true },
|
||||
};
|
||||
|
||||
static backend_cli_args parse_backend_cli(int argc, char ** argv) {
|
||||
backend_cli_args out;
|
||||
static test_args parse_cli(int argc, char ** argv) {
|
||||
test_args out;
|
||||
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
const char * arg = argv[i];
|
||||
|
|
@ -1154,7 +1072,7 @@ static backend_cli_args parse_backend_cli(int argc, char ** argv) {
|
|||
out.device = arg + 9;
|
||||
continue;
|
||||
}
|
||||
if (!out.model) {
|
||||
if (out.model.empty()) {
|
||||
out.model = arg;
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1163,28 +1081,28 @@ static backend_cli_args parse_backend_cli(int argc, char ** argv) {
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (std::strcmp(out.device, "cpu") != 0 && std::strcmp(out.device, "gpu") != 0) {
|
||||
fprintf(stderr, "Invalid device '%s'. Must be 'cpu' or 'gpu'\n", out.device);
|
||||
if (out.device != "cpu" && out.device != "gpu" && out.device != "auto") {
|
||||
fprintf(stderr, "Invalid device '%s'. Must be 'cpu', 'gpu' or 'auto'\n", out.device.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static std::vector<const backend_test_case *> collect_tests_to_run(const char * requested) {
|
||||
static std::vector<const backend_test_case *> collect_tests_to_run(const std::string & requested) {
|
||||
std::vector<const backend_test_case *> selected;
|
||||
|
||||
if (requested != nullptr) {
|
||||
if (!requested.empty()) {
|
||||
for (const auto & test : BACKEND_TESTS) {
|
||||
if (std::strcmp(test.name, requested) == 0) {
|
||||
if (test.name == requested) {
|
||||
selected.push_back(&test);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (selected.empty()) {
|
||||
fprintf(stderr, "Unknown test '%s'. Available tests:\n", requested);
|
||||
fprintf(stderr, "Unknown test '%s'. Available tests:\n", requested.c_str());
|
||||
for (const auto & test : BACKEND_TESTS) {
|
||||
fprintf(stderr, " %s\n", test.name);
|
||||
fprintf(stderr, " %s\n", test.name.c_str());
|
||||
}
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
|
@ -1203,34 +1121,44 @@ static std::vector<const backend_test_case *> collect_tests_to_run(const char *
|
|||
return selected;
|
||||
}
|
||||
|
||||
static void run_tests(const std::vector<const backend_test_case *> & tests, const backend_cli_args & args) {
|
||||
for (const auto * test : tests) {
|
||||
fprintf(stderr, "\n=== %s ===\n", test->name);
|
||||
test->fn(args);
|
||||
static void run_tests(const std::vector<const backend_test_case *> & tests, const test_params & args) {
|
||||
for (const auto & test : tests) {
|
||||
fprintf(stderr, "\n=== %s ===\n", test->name.c_str());
|
||||
try {
|
||||
test->fn(args);
|
||||
} catch (const std::exception & e) {
|
||||
fprintf(stderr, "Error running test '%s': %s\n", test->name.c_str(), e.what());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
backend_cli_args args = parse_backend_cli(argc, argv);
|
||||
test_args args = parse_cli(argc, argv);
|
||||
|
||||
if (args.model == nullptr) {
|
||||
if (args.model.empty()) {
|
||||
args.model = get_model_or_exit(1, argv);
|
||||
}
|
||||
|
||||
std::ifstream file(args.model);
|
||||
if (!file.is_open()) {
|
||||
fprintf(stderr, "no model '%s' found\n", args.model);
|
||||
return EXIT_FAILURE;
|
||||
{
|
||||
std::ifstream file(args.model);
|
||||
if (!file.is_open()) {
|
||||
fprintf(stderr, "no model '%s' found\n", args.model.c_str());
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "using '%s'\n", args.model);
|
||||
fprintf(stderr, "using '%s'\n", args.model.c_str());
|
||||
|
||||
ggml_time_init();
|
||||
llama_backend_init();
|
||||
|
||||
test_params params = {
|
||||
/*.model =*/ load_model(args),
|
||||
};
|
||||
|
||||
const std::vector<const backend_test_case *> tests = collect_tests_to_run(args.test);
|
||||
if (!tests.empty()) {
|
||||
run_tests(tests, args);
|
||||
run_tests(tests, params);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
| -------- | ----------- |
|
||||
| `-h, --help, --usage` | print usage and exit |
|
||||
| `--version` | show version and build info |
|
||||
| `--license` | show source code license and dependencies |
|
||||
| `-cl, --cache-list` | show list of models in cache |
|
||||
| `--completion-bash` | print source-able bash completion script for llama.cpp |
|
||||
| `--verbose-prompt` | print a verbose prompt before generation (default: false) |
|
||||
|
|
@ -56,22 +57,23 @@
|
|||
| `-dt, --defrag-thold N` | KV cache defragmentation threshold (DEPRECATED)<br/>(env: LLAMA_ARG_DEFRAG_THOLD) |
|
||||
| `-np, --parallel N` | number of parallel sequences to decode (default: 1)<br/>(env: LLAMA_ARG_N_PARALLEL) |
|
||||
| `--mlock` | force system to keep model in RAM rather than swapping or compressing<br/>(env: LLAMA_ARG_MLOCK) |
|
||||
| `--mmap, --no-mmap` | whether to memory-map model (if disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)<br/>(env: LLAMA_ARG_MMAP) |
|
||||
| `--mmap, --no-mmap` | whether to memory-map model. Explicitly enabling mmap disables direct-io. (if mmap disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)<br/>(env: LLAMA_ARG_MMAP) |
|
||||
| `-dio, --direct-io, -ndio, --no-direct-io` | use DirectIO if available. Takes precedence over --mmap (default: enabled)<br/>(env: LLAMA_ARG_DIO) |
|
||||
| `--numa TYPE` | attempt optimizations that help on some NUMA systems<br/>- distribute: spread execution evenly over all nodes<br/>- isolate: only spawn threads on CPUs on the node that execution started on<br/>- numactl: use the CPU map provided by numactl<br/>if run without this previously, it is recommended to drop the system page cache before using this<br/>see https://github.com/ggml-org/llama.cpp/issues/1437<br/>(env: LLAMA_ARG_NUMA) |
|
||||
| `-dev, --device <dev1,dev2,..>` | comma-separated list of devices to use for offloading (none = don't offload)<br/>use --list-devices to see a list of available devices<br/>(env: LLAMA_ARG_DEVICE) |
|
||||
| `--list-devices` | print list of available devices and exit |
|
||||
| `-ot, --override-tensor <tensor name pattern>=<buffer type>,...` | override tensor buffer type |
|
||||
| `-ot, --override-tensor <tensor name pattern>=<buffer type>,...` | override tensor buffer type<br/>(env: LLAMA_ARG_OVERRIDE_TENSOR) |
|
||||
| `-cmoe, --cpu-moe` | keep all Mixture of Experts (MoE) weights in the CPU<br/>(env: LLAMA_ARG_CPU_MOE) |
|
||||
| `-ncmoe, --n-cpu-moe N` | keep the Mixture of Experts (MoE) weights of the first N layers in the CPU<br/>(env: LLAMA_ARG_N_CPU_MOE) |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM (default: -1)<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
| `-sm, --split-mode {none,layer,row}` | how to split the model across multiple GPUs, one of:<br/>- none: use one GPU only<br/>- layer (default): split layers and KV across GPUs<br/>- row: split rows across GPUs<br/>(env: LLAMA_ARG_SPLIT_MODE) |
|
||||
| `-ts, --tensor-split N0,N1,N2,...` | fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1<br/>(env: LLAMA_ARG_TENSOR_SPLIT) |
|
||||
| `-mg, --main-gpu INDEX` | the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: 0)<br/>(env: LLAMA_ARG_MAIN_GPU) |
|
||||
| `-fit, --fit [on\|off]` | whether to adjust unset arguments to fit in device memory ('on' or 'off', default: 'on')<br/>(env: LLAMA_ARG_FIT) |
|
||||
| `-fitt, --fit-target MiB` | target margin per device for --fit option, default: 1024<br/>(env: LLAMA_ARG_FIT_TARGET) |
|
||||
| `-fitt, --fit-target MiB0,MiB1,MiB2,...` | target margin per device for --fit, comma-separated list of values, single value is broadcast across all devices, default: 1024<br/>(env: LLAMA_ARG_FIT_TARGET) |
|
||||
| `-fitc, --fit-ctx N` | minimum ctx size that can be set by --fit option, default: 4096<br/>(env: LLAMA_ARG_FIT_CTX) |
|
||||
| `--check-tensors` | check model tensor data for invalid values (default: false) |
|
||||
| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated or repeat this argument.<br/>types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false |
|
||||
| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated values.<br/>types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false |
|
||||
| `--op-offload, --no-op-offload` | whether to offload host tensor operations to device (default: true) |
|
||||
| `--lora FNAME` | path to LoRA adapter (use comma-separated values to load multiple adapters) |
|
||||
| `--lora-scaled FNAME:SCALE,...` | path to LoRA adapter with user defined scaling (format: FNAME:SCALE,...)<br/>note: use comma-separated values |
|
||||
|
|
@ -134,6 +136,7 @@
|
|||
| `--grammar-file FNAME` | file to read grammar from |
|
||||
| `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `-jf, --json-schema-file FILE` | File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `-bs, --backend-sampling` | enable backend sampling (experimental) (default: disabled)<br/>(env: LLAMA_ARG_BACKEND_SAMPLING) |
|
||||
|
||||
|
||||
### CLI-specific params
|
||||
|
|
@ -164,19 +167,19 @@
|
|||
| `-otd, --override-tensor-draft <tensor name pattern>=<buffer type>,...` | override tensor buffer type for draft model |
|
||||
| `-cmoed, --cpu-moe-draft` | keep all Mixture of Experts (MoE) weights in the CPU for the draft model<br/>(env: LLAMA_ARG_CPU_MOE_DRAFT) |
|
||||
| `-ncmoed, --n-cpu-moe-draft N` | keep the Mixture of Experts (MoE) weights of the first N layers in the CPU for the draft model<br/>(env: LLAMA_ARG_N_CPU_MOE_DRAFT) |
|
||||
| `--chat-template-kwargs STRING` | sets additional params for the json template parser<br/>(env: LLAMA_CHAT_TEMPLATE_KWARGS) |
|
||||
| `--chat-template-kwargs STRING` | sets additional params for the json template parser, must be a valid json object string, e.g. '{"key1":"value1","key2":"value2"}'<br/>(env: LLAMA_CHAT_TEMPLATE_KWARGS) |
|
||||
| `--jinja, --no-jinja` | whether to use jinja template engine for chat (default: enabled)<br/>(env: LLAMA_ARG_JINJA) |
|
||||
| `--reasoning-format FORMAT` | controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:<br/>- none: leaves thoughts unparsed in `message.content`<br/>- deepseek: puts thoughts in `message.reasoning_content`<br/>- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`<br/>(default: auto)<br/>(env: LLAMA_ARG_THINK) |
|
||||
| `--reasoning-budget N` | controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)<br/>(env: LLAMA_ARG_THINK_BUDGET) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |
|
||||
| `--simple-io` | use basic IO for better compatibility in subprocesses and limited consoles |
|
||||
| `--draft, --draft-n, --draft-max N` | number of tokens to draft for speculative decoding (default: 16)<br/>(env: LLAMA_ARG_DRAFT_MAX) |
|
||||
| `--draft-min, --draft-n-min N` | minimum number of draft tokens to use for speculative decoding (default: 0)<br/>(env: LLAMA_ARG_DRAFT_MIN) |
|
||||
| `--draft-p-min P` | minimum speculative decoding probability (greedy) (default: 0.8)<br/>(env: LLAMA_ARG_DRAFT_P_MIN) |
|
||||
| `-cd, --ctx-size-draft N` | size of the prompt context for the draft model (default: 0, 0 = loaded from model)<br/>(env: LLAMA_ARG_CTX_SIZE_DRAFT) |
|
||||
| `-devd, --device-draft <dev1,dev2,..>` | comma-separated list of devices to use for offloading the draft model (none = don't offload)<br/>use --list-devices to see a list of available devices |
|
||||
| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | number of layers to store in VRAM for the draft model<br/>(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) |
|
||||
| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | max. number of draft model layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)<br/>(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) |
|
||||
| `-md, --model-draft FNAME` | draft model for speculative decoding (default: unused)<br/>(env: LLAMA_ARG_MODEL_DRAFT) |
|
||||
| `--spec-replace TARGET DRAFT` | translate the string in TARGET into DRAFT if the draft model and main model are not compatible |
|
||||
| `--gpt-oss-20b-default` | use gpt-oss-20b (note: can download weights from the internet) |
|
||||
|
|
|
|||
|
|
@ -95,6 +95,7 @@ llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
|||
| -------- | ----------- |
|
||||
| `-h, --help, --usage` | print usage and exit |
|
||||
| `--version` | show version and build info |
|
||||
| `--license` | show source code license and dependencies |
|
||||
| `-cl, --cache-list` | show list of models in cache |
|
||||
| `--completion-bash` | print source-able bash completion script for llama.cpp |
|
||||
| `--verbose-prompt` | print a verbose prompt before generation (default: false) |
|
||||
|
|
@ -139,22 +140,23 @@ llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
|||
| `-dt, --defrag-thold N` | KV cache defragmentation threshold (DEPRECATED)<br/>(env: LLAMA_ARG_DEFRAG_THOLD) |
|
||||
| `-np, --parallel N` | number of parallel sequences to decode (default: 1)<br/>(env: LLAMA_ARG_N_PARALLEL) |
|
||||
| `--mlock` | force system to keep model in RAM rather than swapping or compressing<br/>(env: LLAMA_ARG_MLOCK) |
|
||||
| `--mmap, --no-mmap` | whether to memory-map model (if disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)<br/>(env: LLAMA_ARG_MMAP) |
|
||||
| `--mmap, --no-mmap` | whether to memory-map model. Explicitly enabling mmap disables direct-io. (if mmap disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)<br/>(env: LLAMA_ARG_MMAP) |
|
||||
| `-dio, --direct-io, -ndio, --no-direct-io` | use DirectIO if available. Takes precedence over --mmap (default: enabled)<br/>(env: LLAMA_ARG_DIO) |
|
||||
| `--numa TYPE` | attempt optimizations that help on some NUMA systems<br/>- distribute: spread execution evenly over all nodes<br/>- isolate: only spawn threads on CPUs on the node that execution started on<br/>- numactl: use the CPU map provided by numactl<br/>if run without this previously, it is recommended to drop the system page cache before using this<br/>see https://github.com/ggml-org/llama.cpp/issues/1437<br/>(env: LLAMA_ARG_NUMA) |
|
||||
| `-dev, --device <dev1,dev2,..>` | comma-separated list of devices to use for offloading (none = don't offload)<br/>use --list-devices to see a list of available devices<br/>(env: LLAMA_ARG_DEVICE) |
|
||||
| `--list-devices` | print list of available devices and exit |
|
||||
| `-ot, --override-tensor <tensor name pattern>=<buffer type>,...` | override tensor buffer type |
|
||||
| `-ot, --override-tensor <tensor name pattern>=<buffer type>,...` | override tensor buffer type<br/>(env: LLAMA_ARG_OVERRIDE_TENSOR) |
|
||||
| `-cmoe, --cpu-moe` | keep all Mixture of Experts (MoE) weights in the CPU<br/>(env: LLAMA_ARG_CPU_MOE) |
|
||||
| `-ncmoe, --n-cpu-moe N` | keep the Mixture of Experts (MoE) weights of the first N layers in the CPU<br/>(env: LLAMA_ARG_N_CPU_MOE) |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM (default: -1)<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
| `-sm, --split-mode {none,layer,row}` | how to split the model across multiple GPUs, one of:<br/>- none: use one GPU only<br/>- layer (default): split layers and KV across GPUs<br/>- row: split rows across GPUs<br/>(env: LLAMA_ARG_SPLIT_MODE) |
|
||||
| `-ts, --tensor-split N0,N1,N2,...` | fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1<br/>(env: LLAMA_ARG_TENSOR_SPLIT) |
|
||||
| `-mg, --main-gpu INDEX` | the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: 0)<br/>(env: LLAMA_ARG_MAIN_GPU) |
|
||||
| `-fit, --fit [on\|off]` | whether to adjust unset arguments to fit in device memory ('on' or 'off', default: 'on')<br/>(env: LLAMA_ARG_FIT) |
|
||||
| `-fitt, --fit-target MiB` | target margin per device for --fit option, default: 1024<br/>(env: LLAMA_ARG_FIT_TARGET) |
|
||||
| `-fitt, --fit-target MiB0,MiB1,MiB2,...` | target margin per device for --fit, comma-separated list of values, single value is broadcast across all devices, default: 1024<br/>(env: LLAMA_ARG_FIT_TARGET) |
|
||||
| `-fitc, --fit-ctx N` | minimum ctx size that can be set by --fit option, default: 4096<br/>(env: LLAMA_ARG_FIT_CTX) |
|
||||
| `--check-tensors` | check model tensor data for invalid values (default: false) |
|
||||
| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated or repeat this argument.<br/>types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false |
|
||||
| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated values.<br/>types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false |
|
||||
| `--op-offload, --no-op-offload` | whether to offload host tensor operations to device (default: true) |
|
||||
| `--lora FNAME` | path to LoRA adapter (use comma-separated values to load multiple adapters) |
|
||||
| `--lora-scaled FNAME:SCALE,...` | path to LoRA adapter with user defined scaling (format: FNAME:SCALE,...)<br/>note: use comma-separated values |
|
||||
|
|
@ -217,6 +219,7 @@ llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
|||
| `--grammar-file FNAME` | file to read grammar from |
|
||||
| `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `-jf, --json-schema-file FILE` | File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `-bs, --backend-sampling` | enable backend sampling (experimental) (default: disabled)<br/>(env: LLAMA_ARG_BACKEND_SAMPLING) |
|
||||
|
||||
|
||||
### Completion-specific params
|
||||
|
|
@ -248,8 +251,8 @@ llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
|||
| `--jinja, --no-jinja` | whether to use jinja template engine for chat (default: disabled)<br/>(env: LLAMA_ARG_JINJA) |
|
||||
| `--reasoning-format FORMAT` | controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:<br/>- none: leaves thoughts unparsed in `message.content`<br/>- deepseek: puts thoughts in `message.reasoning_content`<br/>- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`<br/>(default: auto)<br/>(env: LLAMA_ARG_THINK) |
|
||||
| `--reasoning-budget N` | controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)<br/>(env: LLAMA_ARG_THINK_BUDGET) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |
|
||||
| `--simple-io` | use basic IO for better compatibility in subprocesses and limited consoles |
|
||||
|
||||
<!-- HELP_END -->
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ For the ful list of features, please refer to [server's changelog](https://githu
|
|||
| -------- | ----------- |
|
||||
| `-h, --help, --usage` | print usage and exit |
|
||||
| `--version` | show version and build info |
|
||||
| `--license` | show source code license and dependencies |
|
||||
| `-cl, --cache-list` | show list of models in cache |
|
||||
| `--completion-bash` | print source-able bash completion script for llama.cpp |
|
||||
| `--verbose-prompt` | print a verbose prompt before generation (default: false) |
|
||||
|
|
@ -73,22 +74,23 @@ For the ful list of features, please refer to [server's changelog](https://githu
|
|||
| `-ctv, --cache-type-v TYPE` | KV cache data type for V<br/>allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1<br/>(default: f16)<br/>(env: LLAMA_ARG_CACHE_TYPE_V) |
|
||||
| `-dt, --defrag-thold N` | KV cache defragmentation threshold (DEPRECATED)<br/>(env: LLAMA_ARG_DEFRAG_THOLD) |
|
||||
| `--mlock` | force system to keep model in RAM rather than swapping or compressing<br/>(env: LLAMA_ARG_MLOCK) |
|
||||
| `--mmap, --no-mmap` | whether to memory-map model (if disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)<br/>(env: LLAMA_ARG_MMAP) |
|
||||
| `--mmap, --no-mmap` | whether to memory-map model. Explicitly enabling mmap disables direct-io. (if mmap disabled, slower load but may reduce pageouts if not using mlock) (default: enabled)<br/>(env: LLAMA_ARG_MMAP) |
|
||||
| `-dio, --direct-io, -ndio, --no-direct-io` | use DirectIO if available. Takes precedence over --mmap (default: enabled)<br/>(env: LLAMA_ARG_DIO) |
|
||||
| `--numa TYPE` | attempt optimizations that help on some NUMA systems<br/>- distribute: spread execution evenly over all nodes<br/>- isolate: only spawn threads on CPUs on the node that execution started on<br/>- numactl: use the CPU map provided by numactl<br/>if run without this previously, it is recommended to drop the system page cache before using this<br/>see https://github.com/ggml-org/llama.cpp/issues/1437<br/>(env: LLAMA_ARG_NUMA) |
|
||||
| `-dev, --device <dev1,dev2,..>` | comma-separated list of devices to use for offloading (none = don't offload)<br/>use --list-devices to see a list of available devices<br/>(env: LLAMA_ARG_DEVICE) |
|
||||
| `--list-devices` | print list of available devices and exit |
|
||||
| `-ot, --override-tensor <tensor name pattern>=<buffer type>,...` | override tensor buffer type |
|
||||
| `-ot, --override-tensor <tensor name pattern>=<buffer type>,...` | override tensor buffer type<br/>(env: LLAMA_ARG_OVERRIDE_TENSOR) |
|
||||
| `-cmoe, --cpu-moe` | keep all Mixture of Experts (MoE) weights in the CPU<br/>(env: LLAMA_ARG_CPU_MOE) |
|
||||
| `-ncmoe, --n-cpu-moe N` | keep the Mixture of Experts (MoE) weights of the first N layers in the CPU<br/>(env: LLAMA_ARG_N_CPU_MOE) |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM (default: -1)<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | max. number of layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
| `-sm, --split-mode {none,layer,row}` | how to split the model across multiple GPUs, one of:<br/>- none: use one GPU only<br/>- layer (default): split layers and KV across GPUs<br/>- row: split rows across GPUs<br/>(env: LLAMA_ARG_SPLIT_MODE) |
|
||||
| `-ts, --tensor-split N0,N1,N2,...` | fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1<br/>(env: LLAMA_ARG_TENSOR_SPLIT) |
|
||||
| `-mg, --main-gpu INDEX` | the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: 0)<br/>(env: LLAMA_ARG_MAIN_GPU) |
|
||||
| `-fit, --fit [on\|off]` | whether to adjust unset arguments to fit in device memory ('on' or 'off', default: 'on')<br/>(env: LLAMA_ARG_FIT) |
|
||||
| `-fitt, --fit-target MiB` | target margin per device for --fit option, default: 1024<br/>(env: LLAMA_ARG_FIT_TARGET) |
|
||||
| `-fitt, --fit-target MiB0,MiB1,MiB2,...` | target margin per device for --fit, comma-separated list of values, single value is broadcast across all devices, default: 1024<br/>(env: LLAMA_ARG_FIT_TARGET) |
|
||||
| `-fitc, --fit-ctx N` | minimum ctx size that can be set by --fit option, default: 4096<br/>(env: LLAMA_ARG_FIT_CTX) |
|
||||
| `--check-tensors` | check model tensor data for invalid values (default: false) |
|
||||
| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated or repeat this argument.<br/>types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false |
|
||||
| `--override-kv KEY=TYPE:VALUE,...` | advanced option to override model metadata by key. to specify multiple overrides, either use comma-separated values.<br/>types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false,tokenizer.ggml.add_eos_token=bool:false |
|
||||
| `--op-offload, --no-op-offload` | whether to offload host tensor operations to device (default: true) |
|
||||
| `--lora FNAME` | path to LoRA adapter (use comma-separated values to load multiple adapters) |
|
||||
| `--lora-scaled FNAME:SCALE,...` | path to LoRA adapter with user defined scaling (format: FNAME:SCALE,...)<br/>note: use comma-separated values |
|
||||
|
|
@ -151,6 +153,7 @@ For the ful list of features, please refer to [server's changelog](https://githu
|
|||
| `--grammar-file FNAME` | file to read grammar from |
|
||||
| `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `-jf, --json-schema-file FILE` | File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `-bs, --backend-sampling` | enable backend sampling (experimental) (default: disabled)<br/>(env: LLAMA_ARG_BACKEND_SAMPLING) |
|
||||
|
||||
|
||||
### Server-specific params
|
||||
|
|
@ -187,11 +190,11 @@ For the ful list of features, please refer to [server's changelog](https://githu
|
|||
| `--webui, --no-webui` | whether to enable the Web UI (default: enabled)<br/>(env: LLAMA_ARG_WEBUI) |
|
||||
| `--embedding, --embeddings` | restrict to only support embedding use case; use only with dedicated embedding models (default: disabled)<br/>(env: LLAMA_ARG_EMBEDDINGS) |
|
||||
| `--rerank, --reranking` | enable reranking endpoint on server (default: disabled)<br/>(env: LLAMA_ARG_RERANKING) |
|
||||
| `--api-key KEY` | API key to use for authentication (default: none)<br/>(env: LLAMA_API_KEY) |
|
||||
| `--api-key KEY` | API key to use for authentication, multiple keys can be provided as a comma-separated list (default: none)<br/>(env: LLAMA_API_KEY) |
|
||||
| `--api-key-file FNAME` | path to file containing API keys (default: none) |
|
||||
| `--ssl-key-file FNAME` | path to file a PEM-encoded SSL private key<br/>(env: LLAMA_ARG_SSL_KEY_FILE) |
|
||||
| `--ssl-cert-file FNAME` | path to file a PEM-encoded SSL certificate<br/>(env: LLAMA_ARG_SSL_CERT_FILE) |
|
||||
| `--chat-template-kwargs STRING` | sets additional params for the json template parser<br/>(env: LLAMA_CHAT_TEMPLATE_KWARGS) |
|
||||
| `--chat-template-kwargs STRING` | sets additional params for the json template parser, must be a valid json object string, e.g. '{"key1":"value1","key2":"value2"}'<br/>(env: LLAMA_CHAT_TEMPLATE_KWARGS) |
|
||||
| `-to, --timeout N` | server read/write timeout in seconds (default: 600)<br/>(env: LLAMA_ARG_TIMEOUT) |
|
||||
| `--threads-http N` | number of threads used to process HTTP requests (default: -1)<br/>(env: LLAMA_ARG_THREADS_HTTP) |
|
||||
| `--cache-reuse N` | min chunk size to attempt reusing from the cache via KV shifting (default: 0)<br/>[(card)](https://ggml.ai/f0.png)<br/>(env: LLAMA_ARG_CACHE_REUSE) |
|
||||
|
|
@ -207,8 +210,8 @@ For the ful list of features, please refer to [server's changelog](https://githu
|
|||
| `--jinja, --no-jinja` | whether to use jinja template engine for chat (default: enabled)<br/>(env: LLAMA_ARG_JINJA) |
|
||||
| `--reasoning-format FORMAT` | controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:<br/>- none: leaves thoughts unparsed in `message.content`<br/>- deepseek: puts thoughts in `message.reasoning_content`<br/>- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`<br/>(default: auto)<br/>(env: LLAMA_ARG_THINK) |
|
||||
| `--reasoning-budget N` | controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)<br/>(env: LLAMA_ARG_THINK_BUDGET) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, bailing-think, bailing2, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, grok-2, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, pangu-embedded, phi3, phi4, rwkv-world, seed_oss, smolvlm, solar-open, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |
|
||||
| `--prefill-assistant, --no-prefill-assistant` | whether to prefill the assistant's response if the last message is an assistant message (default: prefill enabled)<br/>when this flag is set, if the last message is an assistant message then it will be treated as a full message and not prefilled<br/><br/>(env: LLAMA_ARG_PREFILL_ASSISTANT) |
|
||||
| `-sps, --slot-prompt-similarity SIMILARITY` | how much the prompt of a request must match the prompt of a slot in order to use that slot (default: 0.10, 0.0 = disabled) |
|
||||
| `--lora-init-without-apply` | load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: disabled) |
|
||||
|
|
@ -220,7 +223,7 @@ For the ful list of features, please refer to [server's changelog](https://githu
|
|||
| `--draft-p-min P` | minimum speculative decoding probability (greedy) (default: 0.8)<br/>(env: LLAMA_ARG_DRAFT_P_MIN) |
|
||||
| `-cd, --ctx-size-draft N` | size of the prompt context for the draft model (default: 0, 0 = loaded from model)<br/>(env: LLAMA_ARG_CTX_SIZE_DRAFT) |
|
||||
| `-devd, --device-draft <dev1,dev2,..>` | comma-separated list of devices to use for offloading the draft model (none = don't offload)<br/>use --list-devices to see a list of available devices |
|
||||
| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | number of layers to store in VRAM for the draft model<br/>(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) |
|
||||
| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | max. number of draft model layers to store in VRAM, either an exact number, 'auto', or 'all' (default: auto)<br/>(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) |
|
||||
| `-md, --model-draft FNAME` | draft model for speculative decoding (default: unused)<br/>(env: LLAMA_ARG_MODEL_DRAFT) |
|
||||
| `--spec-replace TARGET DRAFT` | translate the string in TARGET into DRAFT if the draft model and main model are not compatible |
|
||||
| `-mv, --model-vocoder FNAME` | vocoder model for audio generation (default: unused) |
|
||||
|
|
@ -779,7 +782,8 @@ By default, it is read-only. To make POST request to change global properties, y
|
|||
"modalities": {
|
||||
"vision": false
|
||||
},
|
||||
"build_info": "b(build number)-(build commit hash)"
|
||||
"build_info": "b(build number)-(build commit hash)",
|
||||
"is_sleeping": false
|
||||
}
|
||||
```
|
||||
|
||||
|
|
@ -788,6 +792,7 @@ By default, it is read-only. To make POST request to change global properties, y
|
|||
- `model_path` - the path to model file (same with `-m` argument)
|
||||
- `chat_template` - the model's original Jinja2 prompt template
|
||||
- `modalities` - the list of supported modalities
|
||||
- `is_sleeping` - sleeping status, see [Sleeping on idle](#sleeping-on-idle)
|
||||
|
||||
### POST `/props`: Change server global properties.
|
||||
|
||||
|
|
@ -1630,9 +1635,12 @@ The server supports an automatic sleep mode that activates after a specified per
|
|||
|
||||
When the server enters sleep mode, the model and its associated memory (including the KV cache) are unloaded from RAM to conserve resources. Any new incoming task will automatically trigger the model to reload.
|
||||
|
||||
The sleeping status can be retrieved from the `GET /props` endpoint (or `/props?model=(model_name)` in router mode).
|
||||
|
||||
Note that the following endpoints are exempt from being considered as incoming tasks. They do not trigger model reloading and do not reset the idle timer:
|
||||
- `GET /health`
|
||||
- `GET /props`
|
||||
- `GET /models`
|
||||
|
||||
## More examples
|
||||
|
||||
|
|
|
|||
|
|
@ -160,6 +160,7 @@ task_params server_task::params_from_json_cmpl(
|
|||
defaults.n_keep = params_base.n_keep;
|
||||
defaults.n_predict = params_base.n_predict;
|
||||
defaults.n_cache_reuse = params_base.n_cache_reuse;
|
||||
defaults.cache_prompt = params_base.cache_prompt;
|
||||
defaults.antiprompt = params_base.antiprompt;
|
||||
|
||||
// enabling this will output extra debug information in the HTTP responses from the server
|
||||
|
|
@ -169,7 +170,7 @@ task_params server_task::params_from_json_cmpl(
|
|||
params.stream = json_value(data, "stream", false);
|
||||
auto stream_opt = json_value(data, "stream_options", json::object());
|
||||
params.include_usage = json_value(stream_opt, "include_usage", false);
|
||||
params.cache_prompt = json_value(data, "cache_prompt", true);
|
||||
params.cache_prompt = json_value(data, "cache_prompt", defaults.cache_prompt);
|
||||
params.return_tokens = json_value(data, "return_tokens", false);
|
||||
params.return_progress = json_value(data, "return_progress", false);
|
||||
params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", defaults.n_predict));
|
||||
|
|
|
|||
|
|
@ -1138,6 +1138,7 @@ int getaddrinfo_with_timeout(const char *node, const char *service,
|
|||
|
||||
return ret;
|
||||
#elif TARGET_OS_MAC
|
||||
if (!node) { return EAI_NONAME; }
|
||||
// macOS implementation using CFHost API for asynchronous DNS resolution
|
||||
CFStringRef hostname_ref = CFStringCreateWithCString(
|
||||
kCFAllocatorDefault, node, kCFStringEncodingUTF8);
|
||||
|
|
@ -5569,14 +5570,11 @@ bool Server::read_content(Stream &strm, Request &req, Response &res) {
|
|||
strm, req, res,
|
||||
// Regular
|
||||
[&](const char *buf, size_t n) {
|
||||
// Prevent arithmetic overflow when checking sizes.
|
||||
// Avoid computing (req.body.size() + n) directly because
|
||||
// adding two unsigned `size_t` values can wrap around and
|
||||
// produce a small result instead of indicating overflow.
|
||||
// Instead, check using subtraction: ensure `n` does not
|
||||
// exceed the remaining capacity `max_size() - size()`.
|
||||
if (req.body.size() >= req.body.max_size() ||
|
||||
n > req.body.max_size() - req.body.size()) {
|
||||
// Limit decompressed body size to payload_max_length_ to protect
|
||||
// against "zip bomb" attacks where a small compressed payload
|
||||
// decompresses to a massive size.
|
||||
if (req.body.size() + n > payload_max_length_ ||
|
||||
req.body.size() + n > req.body.max_size()) {
|
||||
return false;
|
||||
}
|
||||
req.body.append(buf, n);
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@
|
|||
#ifndef CPPHTTPLIB_HTTPLIB_H
|
||||
#define CPPHTTPLIB_HTTPLIB_H
|
||||
|
||||
#define CPPHTTPLIB_VERSION "0.30.0"
|
||||
#define CPPHTTPLIB_VERSION_NUM "0x001E00"
|
||||
#define CPPHTTPLIB_VERSION "0.30.1"
|
||||
#define CPPHTTPLIB_VERSION_NUM "0x001E01"
|
||||
|
||||
/*
|
||||
* Platform compatibility check
|
||||
|
|
@ -205,7 +205,10 @@
|
|||
|
||||
#pragma comment(lib, "ws2_32.lib")
|
||||
|
||||
#ifndef _SSIZE_T_DEFINED
|
||||
using ssize_t = __int64;
|
||||
#define _SSIZE_T_DEFINED
|
||||
#endif
|
||||
#endif // _MSC_VER
|
||||
|
||||
#ifndef S_ISREG
|
||||
|
|
@ -2443,16 +2446,20 @@ namespace detail {
|
|||
|
||||
#if defined(_WIN32)
|
||||
inline std::wstring u8string_to_wstring(const char *s) {
|
||||
std::wstring ws;
|
||||
if (!s) { return std::wstring(); }
|
||||
|
||||
auto len = static_cast<int>(strlen(s));
|
||||
if (!len) { return std::wstring(); }
|
||||
|
||||
auto wlen = ::MultiByteToWideChar(CP_UTF8, 0, s, len, nullptr, 0);
|
||||
if (wlen > 0) {
|
||||
ws.resize(wlen);
|
||||
wlen = ::MultiByteToWideChar(
|
||||
CP_UTF8, 0, s, len,
|
||||
const_cast<LPWSTR>(reinterpret_cast<LPCWSTR>(ws.data())), wlen);
|
||||
if (wlen != static_cast<int>(ws.size())) { ws.clear(); }
|
||||
}
|
||||
if (!wlen) { return std::wstring(); }
|
||||
|
||||
std::wstring ws;
|
||||
ws.resize(wlen);
|
||||
wlen = ::MultiByteToWideChar(
|
||||
CP_UTF8, 0, s, len,
|
||||
const_cast<LPWSTR>(reinterpret_cast<LPCWSTR>(ws.data())), wlen);
|
||||
if (wlen != static_cast<int>(ws.size())) { ws.clear(); }
|
||||
return ws;
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Reference in New Issue