Merge branch 'ggml-org:master' into sf/deepseek-ocr
This commit is contained in:
commit
4a4f82968c
|
|
@ -0,0 +1,295 @@
|
|||
# Server WebUI build and tests
|
||||
name: Server WebUI
|
||||
|
||||
on:
|
||||
workflow_dispatch: # allows manual triggering
|
||||
inputs:
|
||||
sha:
|
||||
description: 'Commit SHA1 to build'
|
||||
required: false
|
||||
type: string
|
||||
slow_tests:
|
||||
description: 'Run slow tests'
|
||||
required: true
|
||||
type: boolean
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths: ['.github/workflows/server-webui.yml', 'tools/server/webui/**.*', 'tools/server/tests/**.*', 'tools/server/public/**']
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths: ['.github/workflows/server-webui.yml', 'tools/server/webui/**.*', 'tools/server/tests/**.*', 'tools/server/public/**']
|
||||
|
||||
env:
|
||||
LLAMA_LOG_COLORS: 1
|
||||
LLAMA_LOG_PREFIX: 1
|
||||
LLAMA_LOG_TIMESTAMPS: 1
|
||||
LLAMA_LOG_VERBOSITY: 10
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
webui-setup:
|
||||
name: WebUI Setup
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "npm"
|
||||
cache-dependency-path: "tools/server/webui/package-lock.json"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
id: cache-node-modules
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.cache-node-modules.outputs.cache-hit != 'true'
|
||||
run: npm ci
|
||||
working-directory: tools/server/webui
|
||||
|
||||
webui-check:
|
||||
needs: webui-setup
|
||||
name: WebUI Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Restore node_modules cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Run type checking
|
||||
run: npm run check
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run linting
|
||||
run: npm run lint
|
||||
working-directory: tools/server/webui
|
||||
|
||||
webui-build:
|
||||
needs: webui-check
|
||||
name: WebUI Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Restore node_modules cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Build application
|
||||
run: npm run build
|
||||
working-directory: tools/server/webui
|
||||
|
||||
webui-tests:
|
||||
needs: webui-build
|
||||
name: Run WebUI tests
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Restore node_modules cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: npx playwright install --with-deps
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Build Storybook
|
||||
run: npm run build-storybook
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run Client tests
|
||||
run: npm run test:client
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run Server tests
|
||||
run: npm run test:server
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run UI tests
|
||||
run: npm run test:ui -- --testTimeout=60000
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run E2E tests
|
||||
run: npm run test:e2e
|
||||
working-directory: tools/server/webui
|
||||
|
||||
server-build:
|
||||
needs: [webui-tests]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sanitizer: [ADDRESS, UNDEFINED] # THREAD is broken
|
||||
build_type: [RelWithDebInfo]
|
||||
include:
|
||||
- build_type: Release
|
||||
sanitizer: ""
|
||||
fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
|
||||
|
||||
steps:
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install \
|
||||
build-essential \
|
||||
xxd \
|
||||
git \
|
||||
cmake \
|
||||
curl \
|
||||
wget \
|
||||
language-pack-en \
|
||||
libssl-dev
|
||||
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Python setup
|
||||
id: setup_python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Tests dependencies
|
||||
id: test_dependencies
|
||||
run: |
|
||||
pip install -r tools/server/tests/requirements.txt
|
||||
|
||||
- name: Setup Node.js for WebUI
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "npm"
|
||||
cache-dependency-path: "tools/server/webui/package-lock.json"
|
||||
|
||||
- name: Install WebUI dependencies
|
||||
run: npm ci
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Build WebUI
|
||||
run: npm run build
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Build (no OpenMP)
|
||||
id: cmake_build_no_openmp
|
||||
if: ${{ matrix.sanitizer == 'THREAD' }}
|
||||
run: |
|
||||
cmake -B build \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
|
||||
-DGGML_OPENMP=OFF ;
|
||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
|
||||
|
||||
- name: Build (sanitizers)
|
||||
id: cmake_build_sanitizers
|
||||
if: ${{ matrix.sanitizer != '' && matrix.sanitizer != 'THREAD' }}
|
||||
run: |
|
||||
cmake -B build \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
|
||||
|
||||
- name: Build (sanitizers)
|
||||
id: cmake_build
|
||||
if: ${{ matrix.sanitizer == '' }}
|
||||
run: |
|
||||
cmake -B build \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} ;
|
||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
|
||||
|
||||
- name: Tests
|
||||
id: server_integration_tests
|
||||
if: ${{ matrix.sanitizer == '' }}
|
||||
env:
|
||||
GITHUB_ACTIONS: "true"
|
||||
run: |
|
||||
cd tools/server/tests
|
||||
./tests.sh
|
||||
|
||||
- name: Tests (sanitizers)
|
||||
id: server_integration_tests_sanitizers
|
||||
if: ${{ matrix.sanitizer != '' }}
|
||||
run: |
|
||||
cd tools/server/tests
|
||||
LLAMA_SANITIZE=1 ./tests.sh
|
||||
|
||||
- name: Slow tests
|
||||
id: server_integration_tests_slow
|
||||
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
||||
run: |
|
||||
cd tools/server/tests
|
||||
SLOW_TESTS=1 ./tests.sh
|
||||
|
|
@ -76,270 +76,6 @@ jobs:
|
|||
run: |
|
||||
pip install -r tools/server/tests/requirements.txt
|
||||
|
||||
webui-setup:
|
||||
name: WebUI Setup
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "npm"
|
||||
cache-dependency-path: "tools/server/webui/package-lock.json"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
id: cache-node-modules
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.cache-node-modules.outputs.cache-hit != 'true'
|
||||
run: npm ci
|
||||
working-directory: tools/server/webui
|
||||
|
||||
webui-check:
|
||||
needs: webui-setup
|
||||
name: WebUI Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Restore node_modules cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Run type checking
|
||||
run: npm run check
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run linting
|
||||
run: npm run lint
|
||||
working-directory: tools/server/webui
|
||||
|
||||
webui-build:
|
||||
needs: webui-check
|
||||
name: WebUI Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Restore node_modules cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Build application
|
||||
run: npm run build
|
||||
working-directory: tools/server/webui
|
||||
|
||||
webui-tests:
|
||||
needs: webui-build
|
||||
name: Run WebUI tests
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Restore node_modules cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: tools/server/webui/node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('tools/server/webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-modules-
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: npx playwright install --with-deps
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Build Storybook
|
||||
run: npm run build-storybook
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run Client tests
|
||||
run: npm run test:client
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run Server tests
|
||||
run: npm run test:server
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run UI tests
|
||||
run: npm run test:ui -- --testTimeout=60000
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Run E2E tests
|
||||
run: npm run test:e2e
|
||||
working-directory: tools/server/webui
|
||||
|
||||
server-build:
|
||||
needs: [webui-tests]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sanitizer: [ADDRESS, UNDEFINED] # THREAD is broken
|
||||
build_type: [RelWithDebInfo]
|
||||
include:
|
||||
- build_type: Release
|
||||
sanitizer: ""
|
||||
fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
|
||||
|
||||
steps:
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install \
|
||||
build-essential \
|
||||
xxd \
|
||||
git \
|
||||
cmake \
|
||||
curl \
|
||||
wget \
|
||||
language-pack-en \
|
||||
libssl-dev
|
||||
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Python setup
|
||||
id: setup_python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Tests dependencies
|
||||
id: test_dependencies
|
||||
run: |
|
||||
pip install -r tools/server/tests/requirements.txt
|
||||
|
||||
- name: Setup Node.js for WebUI
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "npm"
|
||||
cache-dependency-path: "tools/server/webui/package-lock.json"
|
||||
|
||||
- name: Install WebUI dependencies
|
||||
run: npm ci
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Build WebUI
|
||||
run: npm run build
|
||||
working-directory: tools/server/webui
|
||||
|
||||
- name: Build (no OpenMP)
|
||||
id: cmake_build_no_openmp
|
||||
if: ${{ matrix.sanitizer == 'THREAD' }}
|
||||
run: |
|
||||
cmake -B build \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
|
||||
-DGGML_OPENMP=OFF ;
|
||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
|
||||
|
||||
- name: Build (sanitizers)
|
||||
id: cmake_build_sanitizers
|
||||
if: ${{ matrix.sanitizer != '' && matrix.sanitizer != 'THREAD' }}
|
||||
run: |
|
||||
cmake -B build \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
|
||||
|
||||
- name: Build (sanitizers)
|
||||
id: cmake_build
|
||||
if: ${{ matrix.sanitizer == '' }}
|
||||
run: |
|
||||
cmake -B build \
|
||||
-DGGML_NATIVE=OFF \
|
||||
-DLLAMA_CURL=OFF \
|
||||
-DLLAMA_OPENSSL=ON \
|
||||
-DLLAMA_BUILD_SERVER=ON \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} ;
|
||||
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
|
||||
|
||||
- name: Tests
|
||||
id: server_integration_tests
|
||||
if: ${{ matrix.sanitizer == '' }}
|
||||
env:
|
||||
GITHUB_ACTIONS: "true"
|
||||
run: |
|
||||
cd tools/server/tests
|
||||
./tests.sh
|
||||
|
||||
- name: Tests (sanitizers)
|
||||
id: server_integration_tests_sanitizers
|
||||
if: ${{ matrix.sanitizer != '' }}
|
||||
run: |
|
||||
cd tools/server/tests
|
||||
LLAMA_SANITIZE=1 ./tests.sh
|
||||
|
||||
- name: Slow tests
|
||||
id: server_integration_tests_slow
|
||||
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
||||
run: |
|
||||
cd tools/server/tests
|
||||
SLOW_TESTS=1 ./tests.sh
|
||||
|
||||
|
||||
server-windows:
|
||||
runs-on: windows-2022
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,8 @@
|
|||
/tests/ @ggerganov
|
||||
/tests/test-chat-.* @pwilkin
|
||||
/tools/batched-bench/ @ggerganov
|
||||
/tools/main/ @ggerganov
|
||||
/tools/cli/ @ngxson
|
||||
/tools/completion/ @ggerganov
|
||||
/tools/mtmd/ @ngxson
|
||||
/tools/perplexity/ @ggerganov
|
||||
/tools/quantize/ @ggerganov
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ The Hugging Face platform provides a variety of online tools for converting, qua
|
|||
|
||||
To learn more about model quantization, [read this documentation](tools/quantize/README.md)
|
||||
|
||||
## [`llama-cli`](tools/main)
|
||||
## [`llama-cli`](tools/cli)
|
||||
|
||||
#### A CLI tool for accessing and experimenting with most of `llama.cpp`'s functionality.
|
||||
|
||||
|
|
@ -525,7 +525,8 @@ To learn more about model quantization, [read this documentation](tools/quantize
|
|||
|
||||
## Other documentation
|
||||
|
||||
- [main (cli)](tools/main/README.md)
|
||||
- [cli](tools/cli/README.md)
|
||||
- [completion](tools/completion/README.md)
|
||||
- [server](tools/server/README.md)
|
||||
- [GBNF grammars](grammars/README.md)
|
||||
|
||||
|
|
|
|||
|
|
@ -136,19 +136,11 @@ class ModelBase:
|
|||
self.remote_hf_model_id = remote_hf_model_id
|
||||
self.sentence_transformers_dense_modules = sentence_transformers_dense_modules
|
||||
self.hparams = ModelBase.load_hparams(self.dir_model, self.is_mistral_format) if hparams is None else hparams
|
||||
self.rope_parameters = self.hparams.get("rope_parameters", self.hparams.get("rope_scaling")) or {}
|
||||
self.model_tensors = self.index_tensors(remote_hf_model_id=remote_hf_model_id)
|
||||
self.metadata_override = metadata_override
|
||||
self.model_name = model_name
|
||||
self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
|
||||
|
||||
# Ensure "rope_theta" and "rope_type" is mirrored in rope_parameters
|
||||
if "full_attention" not in self.rope_parameters and "sliding_attention" not in self.rope_parameters:
|
||||
if "rope_theta" not in self.rope_parameters and (rope_theta := self.find_hparam(["rope_theta", "global_rope_theta", "rotary_emb_base"], optional=True)) is not None:
|
||||
self.rope_parameters["rope_theta"] = rope_theta
|
||||
if "rope_type" not in self.rope_parameters and (rope_type := self.rope_parameters.get("type")) is not None:
|
||||
self.rope_parameters["rope_type"] = rope_type
|
||||
|
||||
# Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
|
||||
if self.ftype == gguf.LlamaFileType.GUESSED:
|
||||
# NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
|
||||
|
|
@ -768,6 +760,15 @@ class TextModel(ModelBase):
|
|||
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
|
||||
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
|
||||
|
||||
self.rope_parameters = self.hparams.get("rope_parameters", self.hparams.get("rope_scaling")) or {}
|
||||
|
||||
# Ensure "rope_theta" and "rope_type" is mirrored in rope_parameters
|
||||
if "full_attention" not in self.rope_parameters and "sliding_attention" not in self.rope_parameters:
|
||||
if "rope_theta" not in self.rope_parameters and (rope_theta := self.find_hparam(["rope_theta", "global_rope_theta", "rotary_emb_base"], optional=True)) is not None:
|
||||
self.rope_parameters["rope_theta"] = rope_theta
|
||||
if "rope_type" not in self.rope_parameters and (rope_type := self.rope_parameters.get("type")) is not None:
|
||||
self.rope_parameters["rope_type"] = rope_type
|
||||
|
||||
@classmethod
|
||||
def __init_subclass__(cls):
|
||||
# can't use an abstract property, because overriding it without type errors
|
||||
|
|
@ -1206,6 +1207,9 @@ class TextModel(ModelBase):
|
|||
if chkhsh == "f4f37b6c8eb9ea29b3eac6bb8c8487c5ab7885f8d8022e67edc1c68ce8403e95":
|
||||
# ref: https://huggingface.co/MiniMaxAI/MiniMax-M2
|
||||
res = "minimax-m2"
|
||||
if chkhsh == "4a2e2abae11ca2b86d570fc5b44be4d5eb5e72cc8f22dd136a94b37da83ab665":
|
||||
# ref: https://huggingface.co/KORMo-Team/KORMo-tokenizer
|
||||
res = "kormo"
|
||||
|
||||
if res is None:
|
||||
logger.warning("\n")
|
||||
|
|
@ -3401,7 +3405,7 @@ class QwenModel(TextModel):
|
|||
self._set_vocab_qwen()
|
||||
|
||||
|
||||
@ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration")
|
||||
@ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration", "KORMoForCausalLM")
|
||||
class Qwen2Model(TextModel):
|
||||
model_arch = gguf.MODEL_ARCH.QWEN2
|
||||
|
||||
|
|
@ -8576,8 +8580,18 @@ class GraniteHybridModel(Mamba2Model, GraniteMoeModel):
|
|||
class NemotronHModel(GraniteHybridModel):
|
||||
"""Hybrid mamba2/attention model from NVIDIA"""
|
||||
model_arch = gguf.MODEL_ARCH.NEMOTRON_H
|
||||
is_moe: bool = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# We have to determine the correct model architecture (MoE vs non-MoE) before
|
||||
# calling the parent __init__. This is because the parent constructor
|
||||
# uses self.model_arch to build the tensor name map, and all MoE-specific
|
||||
# mappings would be missed if it were called with the default non-MoE arch.
|
||||
hparams = ModelBase.load_hparams(args[0], self.is_mistral_format)
|
||||
if "num_experts_per_tok" in hparams:
|
||||
self.model_arch = gguf.MODEL_ARCH.NEMOTRON_H_MOE
|
||||
self.is_moe = True
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Save the top-level head_dim for later
|
||||
|
|
@ -8589,9 +8603,11 @@ class NemotronHModel(GraniteHybridModel):
|
|||
|
||||
# Update the ssm / attn / mlp layers
|
||||
# M: Mamba2, *: Attention, -: MLP
|
||||
# MoE:
|
||||
# M: Mamba2, *: Attention, E: Expert
|
||||
hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
|
||||
self._ssm_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "M"]
|
||||
self._mlp_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "-"]
|
||||
self._mlp_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == ("E" if self.is_moe else "-")]
|
||||
|
||||
def get_attn_layers(self):
|
||||
hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
|
||||
|
|
@ -8607,10 +8623,28 @@ class NemotronHModel(GraniteHybridModel):
|
|||
# Set feed_forward_length
|
||||
# NOTE: This will trigger an override warning. This is preferrable to
|
||||
# duplicating all the parent logic
|
||||
n_ff = self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"])
|
||||
self.gguf_writer.add_feed_forward_length([
|
||||
n_ff if i in self._mlp_layers else 0 for i in range(self.block_count)
|
||||
])
|
||||
if not self.is_moe:
|
||||
n_ff = self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"])
|
||||
self.gguf_writer.add_feed_forward_length([
|
||||
n_ff if i in self._mlp_layers else 0 for i in range(self.block_count)
|
||||
])
|
||||
else:
|
||||
moe_intermediate_size = self.hparams["moe_intermediate_size"]
|
||||
self.gguf_writer.add_feed_forward_length([
|
||||
moe_intermediate_size if i in self._mlp_layers else 0 for i in range(self.block_count)
|
||||
])
|
||||
self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
|
||||
self.gguf_writer.add_expert_feed_forward_length(self.hparams["moe_intermediate_size"])
|
||||
self.gguf_writer.add_expert_shared_feed_forward_length(self.hparams["moe_shared_expert_intermediate_size"])
|
||||
self.gguf_writer.add_expert_count(self.hparams["n_routed_experts"])
|
||||
self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"])
|
||||
self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"])
|
||||
self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"])
|
||||
self.gguf_writer.add_expert_group_count(self.hparams["n_group"])
|
||||
|
||||
# number of experts used per token (top-k)
|
||||
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
|
||||
self.gguf_writer.add_expert_used_count(n_experts_used)
|
||||
|
||||
def set_vocab(self):
|
||||
super().set_vocab()
|
||||
|
|
@ -8618,7 +8652,81 @@ class NemotronHModel(GraniteHybridModel):
|
|||
# The tokenizer _does_ add a BOS token (via post_processor type
|
||||
# TemplateProcessing) but does not set add_bos_token to true in the
|
||||
# config, so we need to explicitly override it here.
|
||||
self.gguf_writer.add_add_bos_token(True)
|
||||
if not self.is_moe:
|
||||
self.gguf_writer.add_add_bos_token(True)
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
if self.is_moe and bid is not None:
|
||||
if name.endswith("mixer.gate.e_score_correction_bias"):
|
||||
new_name = name.replace("e_score_correction_bias", "e_score_correction.bias")
|
||||
mapped_name = self.map_tensor_name(new_name)
|
||||
return [(mapped_name, data_torch)]
|
||||
|
||||
if name.endswith("mixer.dt_bias"):
|
||||
new_name = name.replace("dt_bias", "dt.bias")
|
||||
mapped_name = self.map_tensor_name(new_name)
|
||||
return [(mapped_name, data_torch)]
|
||||
|
||||
if name.endswith("mixer.conv1d.weight"):
|
||||
squeezed_data = data_torch.squeeze()
|
||||
mapped_name = self.map_tensor_name(name)
|
||||
return [(mapped_name, squeezed_data)]
|
||||
|
||||
if name.endswith("mixer.A_log"):
|
||||
transformed_data = -torch.exp(data_torch)
|
||||
reshaped_data = transformed_data.squeeze().reshape(-1, 1)
|
||||
mapped_name = self.map_tensor_name(name)
|
||||
return [(mapped_name, reshaped_data)]
|
||||
|
||||
if name.endswith("mixer.D"):
|
||||
reshaped_data = data_torch.squeeze().reshape(-1, 1)
|
||||
mapped_name = self.map_tensor_name(name)
|
||||
return [(mapped_name, reshaped_data)]
|
||||
|
||||
if name.endswith("mixer.norm.weight"):
|
||||
reshaped_data = data_torch.reshape(8, 512)
|
||||
mapped_name = self.map_tensor_name(name)
|
||||
return [(mapped_name, reshaped_data)]
|
||||
|
||||
if name.find("mixer.experts") != -1:
|
||||
n_experts = self.hparams["n_routed_experts"]
|
||||
assert bid is not None
|
||||
|
||||
if self._experts is None:
|
||||
self._experts = [{} for _ in range(self.block_count)]
|
||||
|
||||
self._experts[bid][name] = data_torch
|
||||
|
||||
if len(self._experts[bid]) >= n_experts * 2:
|
||||
# merge the experts into a single tensor
|
||||
tensors: list[tuple[str, Tensor]] = []
|
||||
for w_name in ["down_proj", "up_proj"]:
|
||||
datas: list[Tensor] = []
|
||||
|
||||
for xid in range(n_experts):
|
||||
ename = f"backbone.layers.{bid}.mixer.experts.{xid}.{w_name}.weight"
|
||||
datas.append(self._experts[bid][ename])
|
||||
del self._experts[bid][ename]
|
||||
|
||||
data_torch = torch.stack(datas, dim=0)
|
||||
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
|
||||
new_name = self.map_tensor_name(merged_name)
|
||||
tensors.append((new_name, data_torch))
|
||||
|
||||
return tensors
|
||||
else:
|
||||
return []
|
||||
|
||||
return super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
def prepare_tensors(self):
|
||||
super().prepare_tensors()
|
||||
|
||||
if self._experts is not None:
|
||||
# flatten `list[dict[str, Tensor]]` into `list[str]`
|
||||
experts = [k for d in self._experts for k in d.keys()]
|
||||
if len(experts) > 0:
|
||||
raise ValueError(f"Unprocessed experts: {experts}")
|
||||
|
||||
|
||||
@ModelBase.register("BailingMoeForCausalLM")
|
||||
|
|
|
|||
|
|
@ -143,6 +143,7 @@ models = [
|
|||
{"name": "bailingmoe2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-mini-base-2.0", },
|
||||
{"name": "granite-docling", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ibm-granite/granite-docling-258M", },
|
||||
{"name": "minimax-m2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/MiniMaxAI/MiniMax-M2", },
|
||||
{"name": "kormo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/KORMo-Team/KORMo-tokenizer", },
|
||||
]
|
||||
|
||||
# some models are known to be broken upstream, so we will skip them as exceptions
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@ Adding a model requires few steps:
|
|||
After following these steps, you can open PR.
|
||||
|
||||
Also, it is important to check that the examples and main ggml backends (CUDA, METAL, CPU) are working with the new architecture, especially:
|
||||
- [main](/tools/main/)
|
||||
- [cli](/tools/cli/)
|
||||
- [completion](/tools/completion/)
|
||||
- [imatrix](/tools/imatrix/)
|
||||
- [quantize](/tools/quantize/)
|
||||
- [server](/tools/server/)
|
||||
|
|
|
|||
|
|
@ -1976,9 +1976,6 @@ static bool ggml_hexagon_supported_mul_mat(const struct ggml_hexagon_session * s
|
|||
break;
|
||||
|
||||
case GGML_TYPE_F16:
|
||||
if (!opt_experimental) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -903,7 +903,7 @@ static void vec_dot_f16_f32(const int n, float * restrict s, const void * restri
|
|||
const float * restrict vy = (const float * restrict) y;
|
||||
|
||||
for (uint32_t i = 0; i < n; i++) {
|
||||
rsum += vx[i] * (__fp16) vy[i];
|
||||
rsum += (float)vx[i] * vy[i];
|
||||
}
|
||||
*s = rsum;
|
||||
return;
|
||||
|
|
@ -917,7 +917,7 @@ static void vec_dot_f16_f32(const int n, float * restrict s, const void * restri
|
|||
|
||||
// for some reason we need volatile here so that the compiler doesn't try anything funky
|
||||
volatile HVX_Vector rsum = Q6_V_vsplat_R(0);
|
||||
|
||||
float r_sum_scalar = 0.0f;
|
||||
uint32_t i = 0;
|
||||
|
||||
for (i = 0; i < nv0; i++) {
|
||||
|
|
@ -926,31 +926,42 @@ static void vec_dot_f16_f32(const int n, float * restrict s, const void * restri
|
|||
HVX_Vector x = vx[i];
|
||||
HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0
|
||||
|
||||
HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp));
|
||||
HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp));
|
||||
//NOTE: need volatile here to prevent compiler optimization
|
||||
// Seem compiler cannot guarantee read-after-write??
|
||||
volatile HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp));
|
||||
volatile HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp));
|
||||
|
||||
HVX_Vector sum = Q6_Vqf32_vadd_Vqf32Vqf32(hi, lo);
|
||||
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum);
|
||||
}
|
||||
|
||||
if (nv1) {
|
||||
HVX_VectorPair yp = vy[i];
|
||||
// HVX_VectorPair yp = vy[i];
|
||||
|
||||
HVX_Vector x = vx[i];
|
||||
HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0
|
||||
// HVX_Vector x = vx[i];
|
||||
// HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0
|
||||
|
||||
if (nv1 >= 32) {
|
||||
HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp));
|
||||
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, hi);
|
||||
nv1 -= 32;
|
||||
}
|
||||
// if (nv1 >= 32) {
|
||||
// volatile HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp));
|
||||
// rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, hi);
|
||||
// nv1 -= 32;
|
||||
// }
|
||||
|
||||
// rsum = hvx_vec_qf32_reduce_sum(rsum);
|
||||
|
||||
// if (nv1) {
|
||||
// volatile HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp));
|
||||
// HVX_Vector sum = hvx_vec_qf32_reduce_sum_n(lo, nv1);
|
||||
// rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum);
|
||||
// }
|
||||
|
||||
//process the remainder using scalar loop
|
||||
rsum = hvx_vec_qf32_reduce_sum(rsum);
|
||||
const __fp16 * restrict sx = (const __fp16 * restrict) x;
|
||||
const float * restrict sy = (const float * restrict) y;
|
||||
|
||||
if (nv1) {
|
||||
HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp));
|
||||
HVX_Vector sum = hvx_vec_qf32_reduce_sum_n(lo, nv1);
|
||||
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum);
|
||||
for (uint32_t i = nv0 * 64; i < n; i++) {
|
||||
r_sum_scalar += (float) sx[i] * sy[i];
|
||||
}
|
||||
|
||||
// hvx_vec_dump_fp16("X", x);
|
||||
|
|
@ -961,7 +972,7 @@ static void vec_dot_f16_f32(const int n, float * restrict s, const void * restri
|
|||
rsum = hvx_vec_qf32_reduce_sum(rsum);
|
||||
}
|
||||
|
||||
*s = hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(rsum));
|
||||
*s = hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(rsum)) + r_sum_scalar;
|
||||
|
||||
# ifdef HTP_DEBUG
|
||||
{
|
||||
|
|
@ -1498,9 +1509,6 @@ static void matmul_f16_f32(struct htp_tensor * restrict src0,
|
|||
uint64_t t1, t2;
|
||||
t1 = HAP_perf_get_qtimer_count();
|
||||
|
||||
const size_t src0_row_size = sizeof(__fp16) * ne00;
|
||||
const size_t src1_row_size = sizeof(float) * ne10;
|
||||
|
||||
assert(ne12 % ne02 == 0);
|
||||
assert(ne13 % ne03 == 0);
|
||||
|
||||
|
|
@ -1510,8 +1518,6 @@ static void matmul_f16_f32(struct htp_tensor * restrict src0,
|
|||
// This is the size of the rest of the dimensions of the result
|
||||
const uint32_t nr1 = ne1 * ne2 * ne3;
|
||||
|
||||
uint32_t chunk_size = 64;
|
||||
|
||||
// distribute the thread work across the inner or outer loop based on which one is larger
|
||||
uint32_t nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
||||
uint32_t nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
|
||||
|
|
@ -1544,11 +1550,11 @@ static void matmul_f16_f32(struct htp_tensor * restrict src0,
|
|||
const uint32_t blck_0 = 64;
|
||||
const uint32_t blck_1 = 64;
|
||||
|
||||
float tmp[32];
|
||||
__attribute__((aligned(128))) float tmp[64];
|
||||
|
||||
for (uint32_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
|
||||
for (uint32_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
|
||||
for (uint32_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1++) {
|
||||
for (uint32_t ir1 = iir1; ir1 < MIN(iir1 + blck_1, ir1_end); ir1++) {
|
||||
const uint32_t i13 = (ir1 / (ne12 * ne1));
|
||||
const uint32_t i12 = (ir1 - i13 * ne12 * ne1) / ne1;
|
||||
const uint32_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1);
|
||||
|
|
@ -1561,13 +1567,16 @@ static void matmul_f16_f32(struct htp_tensor * restrict src0,
|
|||
const uint32_t i2 = i12;
|
||||
const uint32_t i3 = i13;
|
||||
|
||||
const uint8_t * restrict src0_row = (const uint8_t *) src0->data + (0 + i02 * nb02 + i03 * nb03);
|
||||
const uint8_t * restrict src0_base = (const uint8_t *) src0->data + (0 + i02 * nb02 + i03 * nb03);
|
||||
const uint8_t * restrict src1_col =
|
||||
(const uint8_t *) src1->data + (i11 + i12 * ne11 + i13 * ne12 * ne11) * src1_row_size;
|
||||
(const uint8_t *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13);
|
||||
float * dst_col = (float *) ((uint8_t * restrict) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3));
|
||||
|
||||
for (uint32_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0++) {
|
||||
vec_dot_f16_f32(ne00, &tmp[ir0 - iir0], src0_row + ir0 * src0_row_size, src1_col);
|
||||
const uint32_t ir0_block_end = MIN(iir0 + blck_0, ir0_end);
|
||||
for (uint32_t ir0 = iir0; ir0 < ir0_block_end; ir0++) {
|
||||
// Use nb01 stride for non-contiguous src0 support
|
||||
const uint8_t * restrict src0_row = src0_base + ir0 * nb01;
|
||||
vec_dot_f16_f32(ne00, &tmp[ir0 - iir0], src0_row, src1_col);
|
||||
}
|
||||
|
||||
hvx_copy_fp32_ua((uint8_t *) &dst_col[iir0], (uint8_t *) tmp, MIN(iir0 + blck_0, ir0_end) - iir0);
|
||||
|
|
|
|||
|
|
@ -769,9 +769,16 @@ ggml_metal_device_t ggml_metal_device_init(void) {
|
|||
#endif
|
||||
|
||||
dev->props.use_shared_buffers = dev->props.has_unified_memory;
|
||||
#if TARGET_OS_OSX
|
||||
// In case of eGPU, shared memory may be preferable.
|
||||
dev->props.use_shared_buffers |= [dev->mtl_device location] == MTLDeviceLocationExternal;
|
||||
#endif
|
||||
if (getenv("GGML_METAL_SHARED_BUFFERS_DISABLE") != NULL) {
|
||||
dev->props.use_shared_buffers = false;
|
||||
}
|
||||
if (getenv("GGML_METAL_SHARED_BUFFERS_ENABLE") != NULL) {
|
||||
dev->props.use_shared_buffers = true;
|
||||
}
|
||||
|
||||
dev->props.supports_gpu_family_apple7 = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
|
||||
|
||||
|
|
|
|||
|
|
@ -420,6 +420,7 @@ class MODEL_ARCH(IntEnum):
|
|||
JAIS = auto()
|
||||
NEMOTRON = auto()
|
||||
NEMOTRON_H = auto()
|
||||
NEMOTRON_H_MOE = auto()
|
||||
EXAONE = auto()
|
||||
EXAONE4 = auto()
|
||||
GRANITE = auto()
|
||||
|
|
@ -810,6 +811,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
|||
MODEL_ARCH.JAIS: "jais",
|
||||
MODEL_ARCH.NEMOTRON: "nemotron",
|
||||
MODEL_ARCH.NEMOTRON_H: "nemotron_h",
|
||||
MODEL_ARCH.NEMOTRON_H_MOE: "nemotron_h_moe",
|
||||
MODEL_ARCH.EXAONE: "exaone",
|
||||
MODEL_ARCH.EXAONE4: "exaone4",
|
||||
MODEL_ARCH.GRANITE: "granite",
|
||||
|
|
@ -2618,6 +2620,33 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
],
|
||||
MODEL_ARCH.NEMOTRON_H_MOE: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.SSM_IN,
|
||||
MODEL_TENSOR.SSM_CONV1D,
|
||||
MODEL_TENSOR.SSM_DT,
|
||||
MODEL_TENSOR.SSM_A,
|
||||
MODEL_TENSOR.SSM_D,
|
||||
MODEL_TENSOR.SSM_NORM,
|
||||
MODEL_TENSOR.SSM_OUT,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
# experts
|
||||
MODEL_TENSOR.FFN_GATE_INP,
|
||||
MODEL_TENSOR.FFN_UP_EXP,
|
||||
MODEL_TENSOR.FFN_DOWN_EXP,
|
||||
# shared expert
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP,
|
||||
MODEL_TENSOR.FFN_UP_SHEXP,
|
||||
MODEL_TENSOR.FFN_EXP_PROBS_B,
|
||||
],
|
||||
MODEL_ARCH.EXAONE: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
|
|
|
|||
|
|
@ -154,7 +154,8 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.operator_norm", # lfm2
|
||||
"model.transformer.blocks.{bid}.attn_norm", # llada
|
||||
"layers.{bid}.input_layernorm", # qwen3-embedding
|
||||
"model.layers.{bid}.attention_layernorm" # apertus
|
||||
"model.layers.{bid}.attention_layernorm", # apertus
|
||||
"model.layers.{bid}.pre_attention_layernorm", # kormo
|
||||
),
|
||||
|
||||
# Attention norm 2
|
||||
|
|
@ -342,6 +343,7 @@ class TensorNameMap:
|
|||
"model.transformer.blocks.{bid}.ff_norm", # llada
|
||||
"layers.{bid}.post_attention_layernorm", # qwen3-embedding
|
||||
"model.layers.{bid}.feedforward_layernorm", # apertus
|
||||
"model.layers.{bid}.pre_mlp_layernorm", # kormo
|
||||
),
|
||||
|
||||
# Pre feed-forward norm
|
||||
|
|
@ -377,6 +379,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.feed_forward.gate", # lfm2moe
|
||||
"model.layers.{bid}.mlp.router.gate", # afmoe
|
||||
"layers.{bid}.gate", # mistral-large
|
||||
"backbone.layers.{bid}.mixer.gate", # nemotron-h-moe
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
|
||||
|
|
@ -390,6 +393,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.mlp.expert_bias", # afmoe
|
||||
"model.layers.{bid}.feed_forward.expert_bias", # lfm2moe
|
||||
"model.layers.{bid}.block_sparse_moe.e_score_correction", # minimax-m2
|
||||
"backbone.layers.{bid}.mixer.gate.e_score_correction" # nemotron-h-moe
|
||||
),
|
||||
|
||||
# Feed-forward up
|
||||
|
|
@ -438,7 +442,7 @@ class TensorNameMap:
|
|||
"layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
|
||||
"transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
|
||||
"model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) ernie4.5-moe
|
||||
"model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) ernie4.5-moe, nemotron-h-moe (merged)
|
||||
"model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged)
|
||||
"model.layers.{bid}.feed_forward.experts.up_proj", # llama4
|
||||
"encoder.layers.{bid}.mlp.experts.mlp.w1", # nomic-bert-moe
|
||||
|
|
@ -452,6 +456,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.feed_forward.down_proj",
|
||||
"model.layers.{bid}.mlp.shared_mlp.up_proj", # hunyuan
|
||||
"layers.{bid}.shared_experts.w3", # mistral-large
|
||||
"backbone.layers.{bid}.mixer.shared_experts.up_proj", # nemotron-h-moe
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP_CHEXP: (
|
||||
|
|
@ -546,7 +551,7 @@ class TensorNameMap:
|
|||
"layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
|
||||
"transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
|
||||
"model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) ernie4.5-moe
|
||||
"model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) ernie4.5-moe nemotron-h-moe (merged)
|
||||
"model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe
|
||||
"model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged)
|
||||
"model.layers.{bid}.feed_forward.experts.down_proj", # llama4
|
||||
|
|
@ -561,6 +566,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.shared_mlp.output_linear", # granitemoe
|
||||
"model.layers.{bid}.mlp.shared_mlp.down_proj", # hunyuan
|
||||
"layers.{bid}.shared_experts.w2", # mistral-large
|
||||
"backbone.layers.{bid}.mixer.shared_experts.down_proj", # nemotron-h-moe
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN_CHEXP: (
|
||||
|
|
@ -704,6 +710,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.mamba.dt_proj", # jamba falcon-h1 granite-hybrid
|
||||
"model.layers.layers.{bid}.mixer.dt_proj", # plamo2
|
||||
"model.layers.{bid}.linear_attn.dt_proj", # qwen3next
|
||||
"backbone.layers.{bid}.mixer.dt", # nemotron-h-moe
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_DT_NORM: (
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# GBNF Guide
|
||||
|
||||
GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `tools/main` and `tools/server`.
|
||||
GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `tools/cli`, `tools/completion` and `tools/server`.
|
||||
|
||||
## Background
|
||||
|
||||
|
|
@ -135,7 +135,7 @@ While semantically correct, the syntax `x? x? x?.... x?` (with N repetitions) ma
|
|||
You can use GBNF grammars:
|
||||
|
||||
- In [llama-server](../tools/server)'s completion endpoints, passed as the `grammar` body field
|
||||
- In [llama-cli](../tools/main), passed as the `--grammar` & `--grammar-file` flags
|
||||
- In [llama-cli](../tools/cli) and [llama-completion](../tools/completion), passed as the `--grammar` & `--grammar-file` flags
|
||||
- With [test-gbnf-validator](../tests/test-gbnf-validator.cpp), to test them against strings.
|
||||
|
||||
## JSON Schemas → GBNF
|
||||
|
|
@ -145,7 +145,7 @@ You can use GBNF grammars:
|
|||
- In [llama-server](../tools/server):
|
||||
- For any completion endpoints, passed as the `json_schema` body field
|
||||
- For the `/chat/completions` endpoint, passed inside the `response_format` body field (e.g. `{"type", "json_object", "schema": {"items": {}}}` or `{ type: "json_schema", json_schema: {"schema": ...} }`)
|
||||
- In [llama-cli](../tools/main), passed as the `--json` / `-j` flag
|
||||
- In [llama-cli](../tools/cli) and [llama-completion](../tools/completion), passed as the `--json` / `-j` flag
|
||||
- To convert to a grammar ahead of time:
|
||||
- in CLI, with [examples/json_schema_to_grammar.py](../examples/json_schema_to_grammar.py)
|
||||
- in JavaScript with [json-schema-to-grammar.mjs](../tools/server/public_legacy/json-schema-to-grammar.mjs) (this is used by the [server](../tools/server)'s Web UI)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,65 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
|
||||
# Basedir on device
|
||||
basedir=/data/local/tmp/llama.cpp
|
||||
|
||||
cli_opts=
|
||||
|
||||
branch=.
|
||||
[ "$B" != "" ] && branch=$B
|
||||
|
||||
adbserial=
|
||||
[ "$S" != "" ] && adbserial="-s $S"
|
||||
|
||||
model="gemma-3-4b-it-Q4_0.gguf"
|
||||
[ "$M" != "" ] && model="$M"
|
||||
|
||||
mmproj="mmproj-F16.gguf"
|
||||
[ "$MMPROJ" != "" ] && mmproj="$MMPROJ"
|
||||
|
||||
image=
|
||||
[ "$IMG" != "" ] && image="$IMG"
|
||||
|
||||
device="HTP0"
|
||||
[ "$D" != "" ] && device="$D"
|
||||
|
||||
verbose=
|
||||
[ "$V" != "" ] && verbose="GGML_HEXAGON_VERBOSE=$V"
|
||||
|
||||
experimental="GGML_HEXAGON_EXPERIMENTAL=1"
|
||||
[ "$E" != "" ] && experimental="GGML_HEXAGON_EXPERIMENTAL=$E"
|
||||
|
||||
sched=
|
||||
[ "$SCHED" != "" ] && sched="GGML_SCHED_DEBUG=2" cli_opts="$cli_opts -v"
|
||||
|
||||
profile=
|
||||
[ "$PROF" != "" ] && profile="GGML_HEXAGON_PROFILE=$PROF GGML_HEXAGON_OPSYNC=1"
|
||||
|
||||
opmask=
|
||||
[ "$OPMASK" != "" ] && opmask="GGML_HEXAGON_OPMASK=$OPMASK"
|
||||
|
||||
nhvx=
|
||||
[ "$NHVX" != "" ] && nhvx="GGML_HEXAGON_NHVX=$NHVX"
|
||||
|
||||
ndev=
|
||||
[ "$NDEV" != "" ] && ndev="GGML_HEXAGON_NDEV=$NDEV"
|
||||
|
||||
# MTMD backend device for vision model (defaults to CPU if not set)
|
||||
mtmd_backend=
|
||||
[ "$MTMD_DEVICE" != "" ] && mtmd_backend="MTMD_BACKEND_DEVICE=$MTMD_DEVICE"
|
||||
|
||||
set -x
|
||||
|
||||
adb $adbserial shell " \
|
||||
cd $basedir; ulimit -c unlimited; \
|
||||
LD_LIBRARY_PATH=$basedir/$branch/lib \
|
||||
ADSP_LIBRARY_PATH=$basedir/$branch/lib \
|
||||
$verbose $experimental $sched $opmask $profile $nhvx $ndev $mtmd_backend \
|
||||
./$branch/bin/llama-mtmd-cli --no-mmap -m $basedir/../gguf/$model \
|
||||
--mmproj $basedir/../gguf/$mmproj \
|
||||
--image $basedir/../gguf/$image \
|
||||
--poll 1000 -t 6 --cpu-mask 0xfc --cpu-strict 1 \
|
||||
--ctx-size 8192 --batch-size 128 -ctk q8_0 -ctv q8_0 -fa on \
|
||||
-ngl 99 --device $device -v $cli_opts $@ \
|
||||
"
|
||||
|
|
@ -76,6 +76,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|||
{ LLM_ARCH_JAIS, "jais" },
|
||||
{ LLM_ARCH_NEMOTRON, "nemotron" },
|
||||
{ LLM_ARCH_NEMOTRON_H, "nemotron_h" },
|
||||
{ LLM_ARCH_NEMOTRON_H_MOE, "nemotron_h_moe" },
|
||||
{ LLM_ARCH_EXAONE, "exaone" },
|
||||
{ LLM_ARCH_EXAONE4, "exaone4" },
|
||||
{ LLM_ARCH_RWKV6, "rwkv6" },
|
||||
|
|
@ -1800,6 +1801,39 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_NEMOTRON_H_MOE,
|
||||
{
|
||||
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||
{ LLM_TENSOR_OUTPUT, "output" },
|
||||
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||
// mamba(2) ssm layers
|
||||
{ LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
|
||||
{ LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
|
||||
{ LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
|
||||
{ LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
|
||||
{ LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
|
||||
{ LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
|
||||
{ LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
|
||||
// attention layers
|
||||
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||
// dense FFN
|
||||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||
// MoE FFN (for MoE layers)
|
||||
{ LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
|
||||
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||
{ LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
|
||||
{ LLM_TENSOR_FFN_EXP_PROBS_B,"blk.%d.exp_probs_b" },
|
||||
// MoE shared expert layer
|
||||
{ LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
|
||||
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_EXAONE,
|
||||
{
|
||||
|
|
@ -2854,6 +2888,7 @@ bool llm_arch_is_hybrid(const llm_arch & arch) {
|
|||
case LLM_ARCH_LFM2:
|
||||
case LLM_ARCH_LFM2MOE:
|
||||
case LLM_ARCH_NEMOTRON_H:
|
||||
case LLM_ARCH_NEMOTRON_H_MOE:
|
||||
case LLM_ARCH_QWEN3NEXT:
|
||||
return true;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ enum llm_arch {
|
|||
LLM_ARCH_JAIS,
|
||||
LLM_ARCH_NEMOTRON,
|
||||
LLM_ARCH_NEMOTRON_H,
|
||||
LLM_ARCH_NEMOTRON_H_MOE,
|
||||
LLM_ARCH_EXAONE,
|
||||
LLM_ARCH_EXAONE4,
|
||||
LLM_ARCH_RWKV6,
|
||||
|
|
|
|||
|
|
@ -254,6 +254,24 @@ void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
|
|||
}
|
||||
}
|
||||
|
||||
bool llm_graph_input_rs::can_reuse(const llm_graph_params & params) {
|
||||
const auto * mctx = static_cast<const llama_memory_recurrent_context *>(params.mctx);
|
||||
|
||||
this->mctx = mctx;
|
||||
|
||||
bool res = true;
|
||||
|
||||
res &= s_copy->ne[0] == mctx->get_n_rs();
|
||||
|
||||
res &= s_copy_main->ne[0] == params.ubatch.n_seqs;
|
||||
res &= s_copy_extra->ne[0] == mctx->get_n_rs() - params.ubatch.n_seqs;
|
||||
|
||||
res &= head == mctx->get_head();
|
||||
res &= rs_z == mctx->get_rs_z();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
|
||||
GGML_UNUSED(ubatch);
|
||||
|
||||
|
|
@ -461,8 +479,46 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
|
|||
}
|
||||
|
||||
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
|
||||
inp_attn->set_input(ubatch);
|
||||
inp_rs->set_input(ubatch);
|
||||
mctx->get_attn()->set_input_k_idxs(inp_attn->self_k_idxs, ubatch);
|
||||
mctx->get_attn()->set_input_v_idxs(inp_attn->self_v_idxs, ubatch);
|
||||
|
||||
mctx->get_attn()->set_input_kq_mask(inp_attn->self_kq_mask, ubatch, cparams.causal_attn);
|
||||
|
||||
const int64_t n_rs = mctx->get_recr()->get_n_rs();
|
||||
|
||||
if (inp_rs->s_copy) {
|
||||
GGML_ASSERT(ggml_backend_buffer_is_host(inp_rs->s_copy->buffer));
|
||||
int32_t * data = (int32_t *) inp_rs->s_copy->data;
|
||||
|
||||
// assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
|
||||
for (uint32_t i = 0; i < n_rs; ++i) {
|
||||
data[i] = mctx->get_recr()->s_copy(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool llm_graph_input_mem_hybrid::can_reuse(const llm_graph_params & params) {
|
||||
const auto * mctx = static_cast<const llama_memory_hybrid_context *>(params.mctx);
|
||||
|
||||
this->mctx = mctx;
|
||||
|
||||
bool res = true;
|
||||
|
||||
res &= inp_attn->self_k_idxs->ne[0] == params.ubatch.n_tokens;
|
||||
//res &= inp_attn->self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
|
||||
|
||||
res &= inp_attn->self_kq_mask->ne[0] == mctx->get_attn()->get_n_kv();
|
||||
res &= inp_attn->self_kq_mask->ne[1] == params.ubatch.n_tokens;
|
||||
|
||||
res &= inp_rs->s_copy->ne[0] == mctx->get_recr()->get_n_rs();
|
||||
|
||||
res &= inp_rs->s_copy_main->ne[0] == params.ubatch.n_seqs;
|
||||
res &= inp_rs->s_copy_extra->ne[0] == mctx->get_recr()->get_n_rs() - params.ubatch.n_seqs;
|
||||
|
||||
res &= inp_rs->head == mctx->get_recr()->get_head();
|
||||
res &= inp_rs->rs_z == mctx->get_recr()->get_rs_z();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
//
|
||||
|
|
@ -1089,6 +1145,15 @@ ggml_tensor * llm_graph_context::build_moe_ffn(
|
|||
cur = ggml_relu(ctx0, cur);
|
||||
cb(cur, "ffn_moe_relu", il);
|
||||
} break;
|
||||
case LLM_FFN_RELU_SQR:
|
||||
if (gate_exps) {
|
||||
// TODO: add support for gated squared relu
|
||||
GGML_ABORT("fatal error: gated squared relu not implemented");
|
||||
} else {
|
||||
cur = ggml_relu(ctx0, cur);
|
||||
cur = ggml_sqr(ctx0, cur);
|
||||
cb(cur, "ffn_moe_relu_sqr", il);
|
||||
} break;
|
||||
default:
|
||||
GGML_ABORT("fatal error");
|
||||
}
|
||||
|
|
@ -1841,6 +1906,9 @@ static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl(
|
|||
inp->s_copy_main = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0);
|
||||
inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]);
|
||||
|
||||
inp->head = mctx_cur->get_head();
|
||||
inp->rs_z = mctx_cur->get_rs_z();
|
||||
|
||||
return inp;
|
||||
}
|
||||
|
||||
|
|
@ -1909,10 +1977,10 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
|
|||
llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
|
||||
const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx);
|
||||
|
||||
auto inp_rs = build_rs_inp_impl(ctx0, ubatch, mctx_cur->get_recr());
|
||||
auto inp_rs = build_rs_inp_impl (ctx0, ubatch, mctx_cur->get_recr());
|
||||
auto inp_attn = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn());
|
||||
|
||||
auto inp = std::make_unique<llm_graph_input_mem_hybrid>(std::move(inp_attn), std::move(inp_rs), mctx_cur);
|
||||
auto inp = std::make_unique<llm_graph_input_mem_hybrid>(cparams, std::move(inp_attn), std::move(inp_rs), mctx_cur);
|
||||
|
||||
return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -225,6 +225,8 @@ public:
|
|||
|
||||
void set_input(const llama_ubatch * ubatch) override;
|
||||
|
||||
bool can_reuse(const llm_graph_params & params) override;
|
||||
|
||||
ggml_tensor * s_copy; // I32 [n_rs]
|
||||
|
||||
// views of s_copy, computed once per graph
|
||||
|
|
@ -233,6 +235,10 @@ public:
|
|||
ggml_tensor * s_copy_extra; // I32 [n_rs - n_seqs]
|
||||
|
||||
const llama_memory_recurrent_context * mctx;
|
||||
|
||||
// used in view offsets, need to match for valid graph reuse
|
||||
uint32_t head;
|
||||
int32_t rs_z;
|
||||
};
|
||||
|
||||
class llm_graph_input_cross_embd : public llm_graph_input_i {
|
||||
|
|
@ -365,22 +371,28 @@ public:
|
|||
class llm_graph_input_mem_hybrid : public llm_graph_input_i {
|
||||
public:
|
||||
llm_graph_input_mem_hybrid(
|
||||
const llama_cparams & cparams,
|
||||
std::unique_ptr<llm_graph_input_attn_kv> inp_attn,
|
||||
std::unique_ptr<llm_graph_input_rs> inp_rs,
|
||||
const llama_memory_hybrid_context * mctx) :
|
||||
std::unique_ptr<llm_graph_input_rs> inp_rs,
|
||||
const llama_memory_hybrid_context * mctx) :
|
||||
inp_attn(std::move(inp_attn)),
|
||||
inp_rs(std::move(inp_rs)),
|
||||
cparams(cparams),
|
||||
mctx(mctx) { }
|
||||
virtual ~llm_graph_input_mem_hybrid() = default;
|
||||
|
||||
void set_input(const llama_ubatch * ubatch) override;
|
||||
|
||||
bool can_reuse(const llm_graph_params & params) override;
|
||||
|
||||
std::unique_ptr<llm_graph_input_attn_kv> inp_attn;
|
||||
std::unique_ptr<llm_graph_input_rs> inp_rs;
|
||||
|
||||
llm_graph_input_attn_kv * get_attn() const { return inp_attn.get(); }
|
||||
llm_graph_input_rs * get_recr() const { return inp_rs.get(); }
|
||||
|
||||
const llama_cparams cparams;
|
||||
|
||||
const llama_memory_hybrid_context * mctx;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include "ggml.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
||||
void llama_hparams::set_swa_pattern(uint32_t n_pattern, bool dense_first) {
|
||||
|
|
|
|||
|
|
@ -1569,9 +1569,11 @@ void llama_kv_cache::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama
|
|||
|
||||
const uint32_t strm = seq_id == -1 ? s : seq_to_stream[seq_id];
|
||||
|
||||
slot_info sinfo;
|
||||
|
||||
bool res = true;
|
||||
res = res && state_read_meta(io, strm, cell_count, seq_id);
|
||||
res = res && state_read_data(io, strm, cell_count);
|
||||
res = res && state_read_meta(io, strm, cell_count, sinfo, seq_id);
|
||||
res = res && state_read_data(io, strm, cell_count, sinfo);
|
||||
|
||||
if (!res) {
|
||||
if (seq_id == -1) {
|
||||
|
|
@ -1710,7 +1712,7 @@ void llama_kv_cache::state_write_data(llama_io_write_i & io, const cell_ranges_t
|
|||
}
|
||||
}
|
||||
|
||||
bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id) {
|
||||
bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, slot_info & sinfo, llama_seq_id dest_seq_id) {
|
||||
auto & cells = v_cells[strm];
|
||||
auto & head = v_heads[strm];
|
||||
|
||||
|
|
@ -1747,7 +1749,7 @@ bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32
|
|||
ubatch.seq_id[i] = &dest_seq_id;
|
||||
}
|
||||
|
||||
const auto sinfo = find_slot(ubatch, true);
|
||||
sinfo = find_slot(ubatch, false);
|
||||
if (sinfo.empty()) {
|
||||
LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
|
||||
return false;
|
||||
|
|
@ -1757,20 +1759,16 @@ bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32
|
|||
// see: https://github.com/ggml-org/llama.cpp/pull/16825#issuecomment-3460868350
|
||||
apply_ubatch(sinfo, ubatch);
|
||||
|
||||
const auto head_cur = sinfo.head();
|
||||
LLAMA_LOG_DEBUG("%s: cell_count = %d, dest_seq_id = %d\n", __func__, cell_count, dest_seq_id);
|
||||
|
||||
// keep the head at the old position because we will read the KV data into it in state_read_data()
|
||||
head = head_cur;
|
||||
|
||||
LLAMA_LOG_DEBUG("%s: head_cur = %d, head = %d, cell_count = %d, dest_seq_id = %d\n", __func__, head_cur, head, cell_count, dest_seq_id);
|
||||
|
||||
// DEBUG CHECK: head_cur should be our first cell, head_cur + cell_count - 1 should be our last cell (verify seq_id and pos values)
|
||||
// Assume that this is one contiguous block of cells
|
||||
GGML_ASSERT(head_cur + cell_count <= cells.size());
|
||||
GGML_ASSERT(cells.pos_get(head_cur) == ubatch.pos[0]);
|
||||
GGML_ASSERT(cells.pos_get(head_cur + cell_count - 1) == ubatch.pos[cell_count - 1]);
|
||||
GGML_ASSERT(cells.seq_has(head_cur, dest_seq_id));
|
||||
GGML_ASSERT(cells.seq_has(head_cur + cell_count - 1, dest_seq_id));
|
||||
// DEBUG CHECK: verify that all cells were allocated and have correct seq_id and pos values
|
||||
GGML_ASSERT(sinfo.n_stream() == 1);
|
||||
GGML_ASSERT(sinfo.idxs[0].size() == cell_count);
|
||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||
const uint32_t idx = sinfo.idxs[0][i];
|
||||
GGML_ASSERT(cells.pos_get(idx) == ubatch.pos[i]);
|
||||
GGML_ASSERT(cells.seq_has(idx, dest_seq_id));
|
||||
}
|
||||
} else {
|
||||
// whole KV cache restore
|
||||
|
||||
|
|
@ -1803,15 +1801,24 @@ bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32
|
|||
}
|
||||
}
|
||||
|
||||
// Create contiguous slot_info for whole cache restore
|
||||
sinfo.s0 = strm;
|
||||
sinfo.s1 = strm;
|
||||
sinfo.resize(1);
|
||||
sinfo.strm[0] = strm;
|
||||
sinfo.idxs[0].resize(cell_count);
|
||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||
sinfo.idxs[0][i] = i;
|
||||
}
|
||||
|
||||
head = 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count) {
|
||||
bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, const slot_info & sinfo) {
|
||||
auto & cells = v_cells[strm];
|
||||
auto & head = v_heads[strm];
|
||||
|
||||
uint32_t v_trans;
|
||||
uint32_t n_layer;
|
||||
|
|
@ -1861,8 +1868,17 @@ bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32
|
|||
}
|
||||
|
||||
if (cell_count) {
|
||||
// Read and set the keys for the whole cell range
|
||||
ggml_backend_tensor_set(k, io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
|
||||
if (sinfo.is_contiguous()) {
|
||||
// Fast path: contiguous cells, single memcpy
|
||||
ggml_backend_tensor_set(k, io.read(cell_count * k_size_row), sinfo.head() * k_size_row, cell_count * k_size_row);
|
||||
} else {
|
||||
// Slow path: scatter to non-contiguous positions
|
||||
const void * src = io.read(cell_count * k_size_row);
|
||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||
const size_t dst_offset = sinfo.idxs[0][i] * k_size_row;
|
||||
ggml_backend_tensor_set(k, (const char*)src + i * k_size_row, dst_offset, k_size_row);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1893,8 +1909,17 @@ bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32
|
|||
}
|
||||
|
||||
if (cell_count) {
|
||||
// Read and set the values for the whole cell range
|
||||
ggml_backend_tensor_set(v, io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
|
||||
if (sinfo.is_contiguous()) {
|
||||
// Fast path: contiguous cells, single memcpy
|
||||
ggml_backend_tensor_set(v, io.read(cell_count * v_size_row), sinfo.head() * v_size_row, cell_count * v_size_row);
|
||||
} else {
|
||||
// Slow path: scatter to non-contiguous positions
|
||||
const void * src = io.read(cell_count * v_size_row);
|
||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||
const size_t dst_offset = sinfo.idxs[0][i] * v_size_row;
|
||||
ggml_backend_tensor_set(v, (const char*)src + i * v_size_row, dst_offset, v_size_row);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
@ -1933,10 +1958,22 @@ bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32
|
|||
}
|
||||
|
||||
if (cell_count) {
|
||||
// For each row in the transposed matrix, read the values for the whole cell range
|
||||
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
||||
const size_t dst_offset = (head + j * cells.size()) * v_size_el;
|
||||
ggml_backend_tensor_set(v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
|
||||
if (sinfo.is_contiguous()) {
|
||||
// Fast path: contiguous cells
|
||||
const uint32_t h = sinfo.head();
|
||||
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
||||
const size_t dst_offset = (h + j * cells.size()) * v_size_el;
|
||||
ggml_backend_tensor_set(v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
|
||||
}
|
||||
} else {
|
||||
// Slow path: scatter to non-contiguous positions
|
||||
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
||||
const void * src = io.read(cell_count * v_size_el);
|
||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||
const size_t dst_offset = (sinfo.idxs[0][i] + j * cells.size()) * v_size_el;
|
||||
ggml_backend_tensor_set(v, (const char*)src + i * v_size_el, dst_offset, v_size_el);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,6 +72,23 @@ public:
|
|||
void clear() {
|
||||
idxs.clear();
|
||||
}
|
||||
|
||||
// check if indices are contiguous starting from head()
|
||||
bool is_contiguous() const {
|
||||
if (idxs.empty() || idxs[0].empty()) {
|
||||
return true;
|
||||
}
|
||||
if (idxs.size() > 1) {
|
||||
return false;
|
||||
}
|
||||
const uint32_t h = idxs[0][0];
|
||||
for (size_t i = 0; i < idxs[0].size(); ++i) {
|
||||
if (idxs[0][i] != h + i) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
using slot_info_vec_t = std::vector<slot_info>;
|
||||
|
|
@ -264,8 +281,8 @@ private:
|
|||
void state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id = -1) const;
|
||||
void state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const;
|
||||
|
||||
bool state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
||||
bool state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count);
|
||||
bool state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, slot_info & sinfo, llama_seq_id dest_seq_id = -1);
|
||||
bool state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, const slot_info & sinfo);
|
||||
};
|
||||
|
||||
class llama_kv_cache_context : public llama_memory_context_i {
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ llama_memory_hybrid_context::llama_memory_hybrid_context(
|
|||
ubatches(std::move(ubatches)),
|
||||
// note: here we copy the ubatches. not sure if this is ideal
|
||||
ctx_attn(new llama_kv_cache_context(mem->get_mem_attn(), std::move(sinfos_attn), this->ubatches)),
|
||||
ctx_recr(new llama_memory_recurrent_context(mem->get_mem_recr(), this->ubatches)),
|
||||
ctx_recr(new llama_memory_recurrent_context(mem->get_mem_recr(), this->ubatches)),
|
||||
status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const char * llm_type_name(llm_type type) {
|
|||
case LLM_TYPE_16B_A1B: return "16B.A1B";
|
||||
case LLM_TYPE_21B_A3B: return "21B.A3B";
|
||||
case LLM_TYPE_30B_A3B: return "30B.A3B";
|
||||
case LLM_TYPE_31B_A3_5B: return "31B.A3.5B";
|
||||
case LLM_TYPE_80B_A3B: return "80B.A3B";
|
||||
case LLM_TYPE_100B_A6B: return "100B.A6B";
|
||||
case LLM_TYPE_106B_A12B: return "106B.A12B";
|
||||
|
|
@ -1802,6 +1803,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|||
}
|
||||
} break;
|
||||
case LLM_ARCH_NEMOTRON_H:
|
||||
case LLM_ARCH_NEMOTRON_H_MOE:
|
||||
{
|
||||
ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
|
||||
ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
|
||||
|
|
@ -1817,7 +1819,14 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|||
|
||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||
|
||||
ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
|
||||
ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
|
||||
ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared, false);
|
||||
ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
|
||||
ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale, false);
|
||||
|
||||
switch (hparams.n_layer) {
|
||||
case 52: type = LLM_TYPE_31B_A3_5B; break; // Nemotron-H_MOE 31B
|
||||
case 56: type = LLM_TYPE_9B; break;
|
||||
default: type = LLM_TYPE_UNKNOWN;
|
||||
}
|
||||
|
|
@ -3393,9 +3402,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|||
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
|
||||
|
||||
// optional bias tensors
|
||||
layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
|
||||
layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
|
||||
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
|
||||
layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
|
||||
layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
|
||||
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
|
||||
|
||||
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
|
||||
|
||||
|
|
@ -5195,6 +5204,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|||
}
|
||||
} break;
|
||||
case LLM_ARCH_NEMOTRON_H:
|
||||
case LLM_ARCH_NEMOTRON_H_MOE:
|
||||
{
|
||||
// mamba2 Mixer SSM params
|
||||
// NOTE: int64_t for tensor dimensions
|
||||
|
|
@ -5205,6 +5215,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|||
const int64_t n_group = hparams.ssm_n_group;
|
||||
const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_ssm_head;
|
||||
|
||||
const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
|
||||
const int64_t n_ff_shexp = hparams.n_ff_shexp;
|
||||
|
||||
// embeddings
|
||||
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
||||
|
||||
|
|
@ -5254,12 +5267,26 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|||
layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_k_gqa_i}, TENSOR_NOT_REQUIRED);
|
||||
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_v_gqa_i}, TENSOR_NOT_REQUIRED);
|
||||
layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
|
||||
} else {
|
||||
// mlp layers
|
||||
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { hparams.n_ff(i), n_embd}, 0);
|
||||
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, hparams.n_ff(i)}, 0);
|
||||
layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
|
||||
layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {hparams.n_ff(i)}, TENSOR_NOT_REQUIRED);
|
||||
} else {
|
||||
if (n_expert != 0) {
|
||||
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert}, 0);
|
||||
layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert }, 0);
|
||||
|
||||
// MoE branch
|
||||
layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
|
||||
layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
|
||||
|
||||
// Shared expert branch
|
||||
layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
|
||||
layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_shexp}, 0);
|
||||
|
||||
} else {
|
||||
// mlp layers
|
||||
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { hparams.n_ff(i), n_embd}, 0);
|
||||
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, hparams.n_ff(i)}, 0);
|
||||
layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
|
||||
layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {hparams.n_ff(i)}, TENSOR_NOT_REQUIRED);
|
||||
}
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
|
@ -6886,7 +6913,8 @@ void llama_model::print_info() const {
|
|||
arch == LLM_ARCH_PLAMO2 ||
|
||||
arch == LLM_ARCH_GRANITE_HYBRID ||
|
||||
arch == LLM_ARCH_QWEN3NEXT ||
|
||||
arch == LLM_ARCH_NEMOTRON_H) {
|
||||
arch == LLM_ARCH_NEMOTRON_H ||
|
||||
arch == LLM_ARCH_NEMOTRON_H_MOE) {
|
||||
LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
|
||||
LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
|
||||
LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
|
||||
|
|
@ -6941,7 +6969,8 @@ void llama_model::print_info() const {
|
|||
if (arch == LLM_ARCH_MINICPM ||
|
||||
arch == LLM_ARCH_GRANITE ||
|
||||
arch == LLM_ARCH_GRANITE_MOE ||
|
||||
arch == LLM_ARCH_GRANITE_HYBRID) {
|
||||
arch == LLM_ARCH_GRANITE_HYBRID ||
|
||||
arch == LLM_ARCH_NEMOTRON_H_MOE) {
|
||||
LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
|
||||
LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale);
|
||||
LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
|
||||
|
|
@ -7122,7 +7151,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
|
|||
if (arch == LLM_ARCH_FALCON_H1) {
|
||||
filter_attn = [&](int32_t) { return true; };
|
||||
filter_recr = [&](int32_t) { return true; };
|
||||
} else if (arch == LLM_ARCH_NEMOTRON_H) {
|
||||
} else if (arch == LLM_ARCH_NEMOTRON_H || arch == LLM_ARCH_NEMOTRON_H_MOE) {
|
||||
filter_attn = [&](int32_t il) {
|
||||
return !hparams.is_recurrent(il) && hparams.n_ff(il) == 0;
|
||||
};
|
||||
|
|
@ -7494,6 +7523,7 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
|
|||
llm = std::make_unique<llm_build_nemotron>(*this, params);
|
||||
} break;
|
||||
case LLM_ARCH_NEMOTRON_H:
|
||||
case LLM_ARCH_NEMOTRON_H_MOE:
|
||||
{
|
||||
llm = std::make_unique<llm_build_nemotron_h>(*this, params);
|
||||
} break;
|
||||
|
|
@ -7778,6 +7808,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
|
|||
case LLM_ARCH_ARWKV7:
|
||||
case LLM_ARCH_WAVTOKENIZER_DEC:
|
||||
case LLM_ARCH_NEMOTRON_H:
|
||||
case LLM_ARCH_NEMOTRON_H_MOE:
|
||||
return LLAMA_ROPE_TYPE_NONE;
|
||||
|
||||
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
||||
|
|
|
|||
|
|
@ -113,6 +113,7 @@ enum llm_type {
|
|||
LLM_TYPE_16B_A1B,
|
||||
LLM_TYPE_21B_A3B, // Ernie MoE small
|
||||
LLM_TYPE_30B_A3B,
|
||||
LLM_TYPE_31B_A3_5B,
|
||||
LLM_TYPE_80B_A3B, // Qwen3 Next
|
||||
LLM_TYPE_100B_A6B,
|
||||
LLM_TYPE_106B_A12B, // GLM-4.5-Air
|
||||
|
|
|
|||
|
|
@ -1895,7 +1895,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||
clean_spaces = false;
|
||||
} else if (
|
||||
tokenizer_pre == "qwen2" ||
|
||||
tokenizer_pre == "deepseek-r1-qwen") {
|
||||
tokenizer_pre == "deepseek-r1-qwen" ||
|
||||
tokenizer_pre == "kormo") {
|
||||
pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
||||
clean_spaces = false;
|
||||
} else if (
|
||||
|
|
|
|||
|
|
@ -107,12 +107,41 @@ ggml_tensor * llm_build_nemotron_h::build_attention_layer(ggml_tensor *
|
|||
}
|
||||
|
||||
ggml_tensor * llm_build_nemotron_h::build_ffn_layer(ggml_tensor * cur, const llama_model & model, const int il) {
|
||||
cur = build_ffn(cur,
|
||||
model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
||||
NULL, NULL, NULL,
|
||||
model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
||||
NULL, LLM_FFN_RELU_SQR, LLM_FFN_PAR, il);
|
||||
cb(cur, "ffn_out", il);
|
||||
if (model.layers[il].ffn_gate_inp == nullptr) {
|
||||
cur = build_ffn(cur,
|
||||
model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
||||
NULL, NULL, NULL,
|
||||
model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
||||
NULL,
|
||||
LLM_FFN_RELU_SQR, LLM_FFN_PAR, il);
|
||||
cb(cur, "ffn_out", il);
|
||||
} else {
|
||||
ggml_tensor * ffn_inp = cur;
|
||||
ggml_tensor * moe_out =
|
||||
build_moe_ffn(ffn_inp,
|
||||
model.layers[il].ffn_gate_inp,
|
||||
model.layers[il].ffn_up_exps,
|
||||
nullptr, // no gate
|
||||
model.layers[il].ffn_down_exps,
|
||||
model.layers[il].ffn_exp_probs_b,
|
||||
n_expert, n_expert_used,
|
||||
LLM_FFN_RELU_SQR, hparams.expert_weights_norm,
|
||||
true, hparams.expert_weights_scale,
|
||||
LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID,
|
||||
il);
|
||||
cb(moe_out, "ffn_moe_out", il);
|
||||
|
||||
ggml_tensor * ffn_shexp = build_ffn(ffn_inp,
|
||||
model.layers[il].ffn_up_shexp, NULL, NULL,
|
||||
NULL /* no gate */ , NULL, NULL,
|
||||
model.layers[il].ffn_down_shexp, NULL, NULL,
|
||||
NULL,
|
||||
LLM_FFN_RELU_SQR, LLM_FFN_PAR, il);
|
||||
cb(ffn_shexp, "ffn_shexp", il);
|
||||
|
||||
cur = ggml_add(ctx0, moe_out, ffn_shexp);
|
||||
cb(cur, "ffn_out", il);
|
||||
}
|
||||
|
||||
cur = build_cvec(cur, il);
|
||||
cb(cur, "l_out", il);
|
||||
|
|
|
|||
|
|
@ -31,16 +31,25 @@ llm_build_qwen2::llm_build_qwen2(const llama_model & model, const llm_graph_para
|
|||
{
|
||||
// compute Q and K and RoPE them
|
||||
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
|
||||
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
||||
cb(Qcur, "Qcur", il);
|
||||
if (model.layers[il].bq) {
|
||||
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
||||
cb(Qcur, "Qcur", il);
|
||||
}
|
||||
|
||||
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
|
||||
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
||||
cb(Kcur, "Kcur", il);
|
||||
if (model.layers[il].bk) {
|
||||
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
||||
cb(Kcur, "Kcur", il);
|
||||
}
|
||||
|
||||
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
|
||||
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
||||
cb(Vcur, "Vcur", il);
|
||||
if (model.layers[il].bv) {
|
||||
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
||||
cb(Vcur, "Vcur", il);
|
||||
}
|
||||
|
||||
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
||||
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
|
||||
|
|
|
|||
|
|
@ -222,6 +222,14 @@ llama_build_and_test(test-backend-ops.cpp)
|
|||
llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
|
||||
llama_build_and_test(test-autorelease.cpp LABEL "model")
|
||||
|
||||
# Test for state restore with fragmented KV cache
|
||||
# Requires a model, uses same args pattern as test-thread-safety
|
||||
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
|
||||
llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf)
|
||||
else()
|
||||
llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -hf ggml-org/models -hff tinyllamas/stories15M-be.Q4_0.gguf)
|
||||
endif()
|
||||
|
||||
if (NOT GGML_BACKEND_DL)
|
||||
# these tests use the backends directly and cannot be built with dynamic loading
|
||||
llama_build_and_test(test-barrier.cpp)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,122 @@
|
|||
// Test for state restore with fragmented KV cache
|
||||
// This tests the fix for: https://github.com/ggml-org/llama.cpp/issues/17527
|
||||
// The issue was that state restore required contiguous KV cache slots,
|
||||
// which fails when the cache is fragmented.
|
||||
//
|
||||
// The fix changes find_slot(ubatch, true) to find_slot(ubatch, false)
|
||||
// in state_read_meta(), allowing non-contiguous slot allocation.
|
||||
|
||||
#include "arg.h"
|
||||
#include "common.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <vector>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
common_params params;
|
||||
|
||||
params.sampling.seed = 1234;
|
||||
params.kv_unified = true;
|
||||
params.n_parallel = 3;
|
||||
params.n_ctx = 256;
|
||||
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
common_init();
|
||||
|
||||
// init
|
||||
common_init_result_ptr llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init->model();
|
||||
llama_context * ctx = llama_init->context();
|
||||
|
||||
if (model == nullptr || ctx == nullptr) {
|
||||
fprintf(stderr, "%s : failed to init\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
GGML_UNUSED(model);
|
||||
|
||||
// tokenize prompt
|
||||
std::vector<llama_token> tokens(70, 1);
|
||||
|
||||
// interleave the 3 sequences:
|
||||
// 01201230123...
|
||||
llama_batch batch = llama_batch_init(params.n_parallel*tokens.size(), 0, 1);
|
||||
for (size_t i = 0; i < tokens.size(); i++) {
|
||||
for (int s = 0; s < params.n_parallel; ++s) {
|
||||
common_batch_add(batch, tokens[i], i, {s}, false);
|
||||
}
|
||||
}
|
||||
batch.logits[batch.n_tokens - 1] = true;
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
fprintf(stderr, "%s : failed to decode seq 0\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s : processed prompt on seq 0, 1, 2 (%zu tokens each)\n", __func__, tokens.size());
|
||||
|
||||
// Save state of seq 1
|
||||
std::vector<uint8_t> seq_state(llama_state_seq_get_size(ctx, 1));
|
||||
const size_t ncopy = llama_state_seq_get_data(ctx, seq_state.data(), seq_state.size(), 1);
|
||||
if (ncopy != seq_state.size()) {
|
||||
fprintf(stderr, "%s : failed to save seq 1 state\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
fprintf(stderr, "%s : saved seq 1 state, %zu bytes\n", __func__, ncopy);
|
||||
|
||||
// clear seq 1 to create a "hole" in the KV cache (fragmentation)
|
||||
// 0.20.20.20.2....
|
||||
llama_memory_t mem = llama_get_memory(ctx);
|
||||
llama_memory_seq_rm(mem, 1, -1, -1);
|
||||
fprintf(stderr, "%s : cleared seq 1 to create fragmentation\n", __func__);
|
||||
|
||||
// Now the cache has holes where seq 1 was
|
||||
// This creates fragmentation - there's no contiguous block large enough
|
||||
// for the seq 1 state if we only look for contiguous slots
|
||||
|
||||
// Restore seq 1 state into seq 1 (should work with non-contiguous allocation)
|
||||
// We use seq 1 since it's a valid sequence ID (0 to n_parallel-1)
|
||||
// Before the fix, this would fail with "failed to find available cells in kv cache"
|
||||
const size_t nset = llama_state_seq_set_data(ctx, seq_state.data(), seq_state.size(), 1);
|
||||
if (nset != seq_state.size()) {
|
||||
fprintf(stderr, "%s : FAILED to restore seq state into fragmented cache (got %zu, expected %zu)\n",
|
||||
__func__, nset, seq_state.size());
|
||||
fprintf(stderr, "%s : This is the bug - state restore fails with fragmented KV cache\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
return 1;
|
||||
}
|
||||
fprintf(stderr, "%s : restored state into seq 1, %zu bytes\n", __func__, nset);
|
||||
|
||||
// Verify we can decode with the restored state
|
||||
// Generate one token to verify the restored state is usable
|
||||
auto sparams = llama_sampler_chain_default_params();
|
||||
llama_sampler * smpl = llama_sampler_chain_init(sparams);
|
||||
llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sampling.seed));
|
||||
|
||||
auto next_token = llama_sampler_sample(smpl, ctx, -1);
|
||||
auto next_token_str = common_token_to_piece(ctx, next_token);
|
||||
|
||||
common_batch_clear(batch);
|
||||
common_batch_add(batch, next_token, (int)tokens.size(), {1}, true);
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
fprintf(stderr, "%s : failed to decode with restored state\n", __func__);
|
||||
llama_sampler_free(smpl);
|
||||
llama_batch_free(batch);
|
||||
return 1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s : successfully decoded with restored state, generated: '%s'\n", __func__, next_token_str.c_str());
|
||||
fprintf(stderr, "%s : SUCCESS - state restore works with fragmented KV cache\n", __func__);
|
||||
|
||||
llama_sampler_free(smpl);
|
||||
llama_batch_free(batch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
TODO
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
# llama.cpp/tools/main
|
||||
# llama.cpp/tools/completion
|
||||
|
||||
This example program allows you to use various LLaMA language models easily and efficiently. It is specifically designed to work with the [llama.cpp](https://github.com/ggml-org/llama.cpp) project, which provides a plain C/C++ implementation with optional 4-bit quantization support for faster, lower memory inference, and is optimized for desktop CPUs. This program can be used to perform various inference tasks with LLaMA models, including generating text based on user-provided prompts and chat-like interactions with reverse prompts.
|
||||
|
||||
|
|
@ -27,64 +27,64 @@ Once downloaded, place your model in the models folder in llama.cpp.
|
|||
##### Input prompt (One-and-done)
|
||||
|
||||
```bash
|
||||
./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf -no-cnv --prompt "Once upon a time"
|
||||
./llama-completion -m models/gemma-1.1-7b-it.Q4_K_M.gguf -no-cnv --prompt "Once upon a time"
|
||||
```
|
||||
##### Conversation mode (Allow for continuous interaction with the model)
|
||||
|
||||
```bash
|
||||
./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --chat-template gemma
|
||||
./llama-completion -m models/gemma-1.1-7b-it.Q4_K_M.gguf --chat-template gemma
|
||||
```
|
||||
|
||||
##### Conversation mode using built-in jinja chat template
|
||||
|
||||
```bash
|
||||
./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --jinja
|
||||
./llama-completion -m models/gemma-1.1-7b-it.Q4_K_M.gguf --jinja
|
||||
```
|
||||
|
||||
##### One-and-done query using jinja with custom system prompt and a starting prompt
|
||||
|
||||
```bash
|
||||
./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --jinja --single-turn -sys "You are a helpful assistant" -p "Hello"
|
||||
./llama-completion -m models/gemma-1.1-7b-it.Q4_K_M.gguf --jinja --single-turn -sys "You are a helpful assistant" -p "Hello"
|
||||
```
|
||||
|
||||
##### Infinite text from a starting prompt (you can use `Ctrl-C` to stop it):
|
||||
```bash
|
||||
./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
||||
./llama-completion -m models/gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
||||
```
|
||||
|
||||
### Windows:
|
||||
|
||||
##### Input prompt (One-and-done)
|
||||
```powershell
|
||||
./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf -no-cnv --prompt "Once upon a time"
|
||||
./llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf -no-cnv --prompt "Once upon a time"
|
||||
```
|
||||
##### Conversation mode (Allow for continuous interaction with the model)
|
||||
|
||||
```powershell
|
||||
./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --chat-template gemma
|
||||
./llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --chat-template gemma
|
||||
```
|
||||
|
||||
##### Conversation mode using built-in jinja chat template
|
||||
|
||||
```powershell
|
||||
./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --jinja
|
||||
./llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --jinja
|
||||
```
|
||||
|
||||
##### One-and-done query using jinja with custom system prompt and a starting prompt
|
||||
|
||||
```powershell
|
||||
./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --jinja --single-turn -sys "You are a helpful assistant" -p "Hello"
|
||||
./llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --jinja --single-turn -sys "You are a helpful assistant" -p "Hello"
|
||||
```
|
||||
|
||||
#### Infinite text from a starting prompt (you can use `Ctrl-C` to stop it):
|
||||
|
||||
```powershell
|
||||
llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
||||
llama-completion.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
||||
```
|
||||
|
||||
## Common Options
|
||||
|
||||
In this section, we cover the most commonly used options for running the `llama-cli` program with the LLaMA models:
|
||||
In this section, we cover the most commonly used options for running the `llama-completion` program with the LLaMA models:
|
||||
|
||||
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/gemma-1.1-7b-it.Q4_K_M.gguf`; inferred from `--model-url` if set).
|
||||
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g [https://huggingface.co/ggml-org/gemma-1.1-7b-it-Q4_K_M-GGUF/resolve/main/gemma-1.1-7b-it.Q4_K_M.gguf?download=true](https://huggingface.co/ggml-org/gemma-1.1-7b-it-Q4_K_M-GGUF/resolve/main/gemma-1.1-7b-it.Q4_K_M.gguf?download=true)).
|
||||
|
|
@ -97,7 +97,7 @@ In this section, we cover the most commonly used options for running the `llama-
|
|||
|
||||
## Input Prompts
|
||||
|
||||
The `llama-cli` program provides several ways to interact with the LLaMA models using input prompts:
|
||||
The `llama-completion` program provides several ways to interact with the LLaMA models using input prompts:
|
||||
|
||||
- `--prompt PROMPT`: Provide a prompt directly as a command-line option.
|
||||
- `--file FNAME`: Provide a file containing a prompt or multiple prompts.
|
||||
|
|
@ -107,7 +107,7 @@ The `llama-cli` program provides several ways to interact with the LLaMA models
|
|||
|
||||
## Interaction
|
||||
|
||||
The `llama-cli` program offers a seamless way to interact with LLaMA models, allowing users to engage in real-time conversations or provide instructions for specific tasks. The interactive mode can be triggered using various options, including `--interactive` and `--interactive-first`.
|
||||
The `llama-completion` program offers a seamless way to interact with LLaMA models, allowing users to engage in real-time conversations or provide instructions for specific tasks. The interactive mode can be triggered using various options, including `--interactive` and `--interactive-first`.
|
||||
|
||||
In interactive mode, users can participate in text generation by injecting their input during the process. Users can press `Ctrl+C` at any time to interject and type their input, followed by pressing `Return` to submit it to the LLaMA model. To submit additional lines without finalizing input, users can end the current line with a backslash (`\`) and continue typing.
|
||||
|
||||
|
|
@ -136,7 +136,7 @@ To overcome this limitation, you can use the `--in-prefix` flag to add a space o
|
|||
The `--in-prefix` flag is used to add a prefix to your input, primarily, this is used to insert a space after the reverse prompt. Here's an example of how to use the `--in-prefix` flag in conjunction with the `--reverse-prompt` flag:
|
||||
|
||||
```sh
|
||||
./llama-cli -r "User:" --in-prefix " "
|
||||
./llama-completion -r "User:" --in-prefix " "
|
||||
```
|
||||
|
||||
### In-Suffix
|
||||
|
|
@ -144,7 +144,7 @@ The `--in-prefix` flag is used to add a prefix to your input, primarily, this is
|
|||
The `--in-suffix` flag is used to add a suffix after your input. This is useful for adding an "Assistant:" prompt after the user's input. It's added after the new-line character (`\n`) that's automatically added to the end of the user's input. Here's an example of how to use the `--in-suffix` flag in conjunction with the `--reverse-prompt` flag:
|
||||
|
||||
```sh
|
||||
./llama-cli -r "User:" --in-prefix " " --in-suffix "Assistant:"
|
||||
./llama-completion -r "User:" --in-prefix " " --in-suffix "Assistant:"
|
||||
```
|
||||
When --in-prefix or --in-suffix options are enabled the chat template ( --chat-template ) is disabled
|
||||
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ Each test is repeated the number of times given by `-r`, and the results are ave
|
|||
|
||||
Using the `-d <n>` option, each test can be run at a specified context depth, prefilling the KV cache with `<n>` tokens.
|
||||
|
||||
For a description of the other options, see the [main example](../main/README.md).
|
||||
For a description of the other options, see the [completion example](../completion/README.md).
|
||||
|
||||
> [!NOTE]
|
||||
> The measurements with `llama-bench` do not include the times for tokenization and for sampling.
|
||||
|
|
|
|||
|
|
@ -70,6 +70,13 @@ struct clip_hparams {
|
|||
int32_t n_mel_bins = 0; // whisper preprocessor
|
||||
int32_t proj_stack_factor = 0; // ultravox
|
||||
|
||||
// audio-to-mel preprocessor params
|
||||
int32_t audio_chunk_len = -1; // in seconds
|
||||
int32_t audio_sample_rate = -1;
|
||||
int32_t audio_n_fft = -1;
|
||||
int32_t audio_window_len = -1;
|
||||
int32_t audio_hop_len = -1;
|
||||
|
||||
// legacy
|
||||
bool has_llava_projector = false;
|
||||
int minicpmv_version = 0;
|
||||
|
|
@ -323,3 +330,5 @@ struct clip_model {
|
|||
|| proj_type == PROJECTOR_TYPE_VOXTRAL;
|
||||
}
|
||||
};
|
||||
|
||||
const clip_hparams * clip_get_hparams(const struct clip_ctx * ctx);
|
||||
|
|
|
|||
|
|
@ -1174,11 +1174,15 @@ struct clip_model_loader {
|
|||
model.proj_type == PROJECTOR_TYPE_VOXTRAL ||
|
||||
model.proj_type == PROJECTOR_TYPE_GLMA;
|
||||
get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
|
||||
if (hparams.n_mel_bins != 128) {
|
||||
throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
|
||||
}
|
||||
hparams.ffn_op = FFN_GELU_ERF;
|
||||
log_ffn_op = "gelu_erf"; // temporary solution for logging
|
||||
|
||||
// audio preprocessing params
|
||||
hparams.audio_chunk_len = 30; // in seconds
|
||||
hparams.audio_sample_rate = 16000;
|
||||
hparams.audio_n_fft = 400;
|
||||
hparams.audio_window_len = 400;
|
||||
hparams.audio_hop_len = 160;
|
||||
} break;
|
||||
case PROJECTOR_TYPE_DEEPSEEKOCR:
|
||||
{
|
||||
|
|
@ -1227,6 +1231,11 @@ struct clip_model_loader {
|
|||
LOG_INF("\n--- audio hparams ---\n");
|
||||
LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
|
||||
LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
|
||||
LOG_INF("%s: audio_chunk_len: %d\n", __func__, hparams.audio_chunk_len);
|
||||
LOG_INF("%s: audio_sample_rate: %d\n", __func__, hparams.audio_sample_rate);
|
||||
LOG_INF("%s: audio_n_fft: %d\n", __func__, hparams.audio_n_fft);
|
||||
LOG_INF("%s: audio_window_len: %d\n", __func__, hparams.audio_window_len);
|
||||
LOG_INF("%s: audio_hop_len: %d\n", __func__, hparams.audio_hop_len);
|
||||
}
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
|
||||
|
|
@ -3983,3 +3992,7 @@ void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel
|
|||
batch->entries.push_back(clip_image_f32_ptr(audio));
|
||||
batch->is_audio = true;
|
||||
}
|
||||
|
||||
const clip_hparams * clip_get_hparams(const struct clip_ctx * ctx) {
|
||||
return &ctx->model.hparams;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
#include "clip-model.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
|
@ -8,18 +9,7 @@
|
|||
|
||||
#define MTMD_INTERNAL_HEADER
|
||||
|
||||
#define WHISPER_ASSERT GGML_ASSERT
|
||||
|
||||
#define WHISPER_SAMPLE_RATE 16000
|
||||
#define WHISPER_N_FFT 400
|
||||
#define WHISPER_HOP_LENGTH 160
|
||||
#define WHISPER_CHUNK_SIZE 30
|
||||
|
||||
#define COMMON_SAMPLE_RATE 16000
|
||||
|
||||
namespace whisper_preprocessor {
|
||||
|
||||
struct whisper_mel {
|
||||
struct mtmd_audio_mel {
|
||||
int n_len;
|
||||
int n_len_org;
|
||||
int n_mel;
|
||||
|
|
@ -27,23 +17,18 @@ struct whisper_mel {
|
|||
std::vector<float> data;
|
||||
};
|
||||
|
||||
struct whisper_filters {
|
||||
int32_t n_mel;
|
||||
int32_t n_fft;
|
||||
struct mtmd_audio_preprocessor {
|
||||
const clip_hparams & hparams;
|
||||
|
||||
std::vector<float> data;
|
||||
mtmd_audio_preprocessor(const clip_ctx * ctx): hparams(*clip_get_hparams(ctx)) {}
|
||||
|
||||
virtual ~mtmd_audio_preprocessor() = default;
|
||||
virtual void initialize() = 0; // NOT thread-safe
|
||||
virtual bool preprocess(const float * samples, size_t n_samples, std::vector<mtmd_audio_mel> & output) = 0;
|
||||
};
|
||||
|
||||
bool preprocess_audio(
|
||||
const float * samples,
|
||||
size_t n_samples,
|
||||
const whisper_filters & filters,
|
||||
std::vector<whisper_mel> & output);
|
||||
|
||||
} // namespace whisper_preprocessor
|
||||
|
||||
namespace whisper_precalc_filters {
|
||||
|
||||
whisper_preprocessor::whisper_filters get_128_bins();
|
||||
|
||||
} // namespace whisper_precalc_filters
|
||||
struct mtmd_audio_preprocessor_whisper : mtmd_audio_preprocessor {
|
||||
mtmd_audio_preprocessor_whisper(const clip_ctx * ctx) : mtmd_audio_preprocessor(ctx) {}
|
||||
void initialize() override;
|
||||
bool preprocess(const float * samples, size_t n_samples, std::vector<mtmd_audio_mel> & output) override;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -151,8 +151,7 @@ struct mtmd_context {
|
|||
// string template for slice image delimiters with row/col (idefics3)
|
||||
std::string sli_img_start_tmpl;
|
||||
|
||||
// for whisper, we pre-calculate the mel filter bank
|
||||
whisper_preprocessor::whisper_filters w_filters;
|
||||
std::unique_ptr<mtmd_audio_preprocessor> audio_preproc;
|
||||
|
||||
// TODO @ngxson : add timings
|
||||
|
||||
|
|
@ -317,14 +316,25 @@ struct mtmd_context {
|
|||
GGML_ASSERT(ctx_a != nullptr);
|
||||
projector_type proj = clip_get_projector_type(ctx_a);
|
||||
|
||||
if (clip_has_whisper_encoder(ctx_a)) {
|
||||
// TODO @ngxson : check if model n_mel is 128 or 80
|
||||
w_filters = whisper_precalc_filters::get_128_bins();
|
||||
}
|
||||
|
||||
LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n"
|
||||
" https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__);
|
||||
|
||||
// set preprocessor
|
||||
switch (proj) {
|
||||
case PROJECTOR_TYPE_QWEN2A:
|
||||
case PROJECTOR_TYPE_QWEN25O:
|
||||
case PROJECTOR_TYPE_ULTRAVOX:
|
||||
case PROJECTOR_TYPE_VOXTRAL:
|
||||
audio_preproc = std::make_unique<mtmd_audio_preprocessor_whisper>(ctx_a);
|
||||
break;
|
||||
default:
|
||||
GGML_ABORT("unsupported audio projector type");
|
||||
}
|
||||
|
||||
// initialize audio preprocessor
|
||||
audio_preproc->initialize();
|
||||
|
||||
// set special tokens
|
||||
if (proj == PROJECTOR_TYPE_QWEN2A) {
|
||||
// <|audio_bos|> ... (embeddings) ... <|audio_eos|>
|
||||
aud_beg = "<|audio_bos|>";
|
||||
|
|
@ -653,11 +663,10 @@ struct mtmd_tokenizer {
|
|||
}
|
||||
|
||||
// preprocess audio
|
||||
GGML_ASSERT(ctx->w_filters.n_mel); // make sure we have filter preloaded
|
||||
std::vector<whisper_preprocessor::whisper_mel> mel_spec_chunks;
|
||||
std::vector<mtmd_audio_mel> mel_spec_chunks;
|
||||
const float * samples = (const float *)bitmap->data.data();
|
||||
size_t n_samples = bitmap->data.size() / sizeof(float);
|
||||
bool ok = whisper_preprocessor::preprocess_audio(samples, n_samples, ctx->w_filters, mel_spec_chunks);
|
||||
bool ok = ctx->audio_preproc->preprocess(samples, n_samples, mel_spec_chunks);
|
||||
if (!ok) {
|
||||
LOG_ERR("Unable to preprocess audio\n");
|
||||
return 2;
|
||||
|
|
@ -864,8 +873,7 @@ int mtmd_get_audio_bitrate(mtmd_context * ctx) {
|
|||
if (!ctx->ctx_a) {
|
||||
return -1;
|
||||
}
|
||||
// for now, we assume that all audio models have the same bitrate
|
||||
return 16000; // 16kHz
|
||||
return clip_get_hparams(ctx->ctx_a)->audio_sample_rate;
|
||||
}
|
||||
|
||||
//
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -619,11 +619,12 @@ flowchart TB
|
|||
|
||||
### Test Types
|
||||
|
||||
| Type | Tool | Location | Command |
|
||||
| ------------- | ------------------ | -------------------------------- | ------------------- |
|
||||
| **E2E** | Playwright | `tests/e2e/` | `npm run test:e2e` |
|
||||
| **Unit** | Vitest | `tests/client/`, `tests/server/` | `npm run test:unit` |
|
||||
| **UI/Visual** | Storybook + Vitest | `tests/stories/` | `npm run test:ui` |
|
||||
| Type | Tool | Location | Command |
|
||||
| ------------- | ------------------ | ---------------- | ------------------- |
|
||||
| **Unit** | Vitest | `tests/unit/` | `npm run test:unit` |
|
||||
| **UI/Visual** | Storybook + Vitest | `tests/stories/` | `npm run test:ui` |
|
||||
| **E2E** | Playwright | `tests/e2e/` | `npm run test:e2e` |
|
||||
| **Client** | Vitest | `tests/client/`. | `npm run test:unit` |
|
||||
|
||||
### Running Tests
|
||||
|
||||
|
|
|
|||
|
|
@ -13,12 +13,11 @@
|
|||
"reset": "rm -rf .svelte-kit node_modules",
|
||||
"format": "prettier --write .",
|
||||
"lint": "prettier --check . && eslint .",
|
||||
"test": "npm run test:ui -- --run && npm run test:client -- --run && npm run test:server -- --run && npm run test:e2e",
|
||||
"test": "npm run test:ui -- --run && npm run test:client -- --run && npm run test:unit -- --run && npm run test:e2e",
|
||||
"test:e2e": "playwright test",
|
||||
"test:client": "vitest --project=client",
|
||||
"test:server": "vitest --project=server",
|
||||
"test:unit": "vitest --project=unit",
|
||||
"test:ui": "vitest --project=ui",
|
||||
"test:unit": "vitest",
|
||||
"storybook": "storybook dev -p 6006",
|
||||
"build-storybook": "storybook build",
|
||||
"cleanup": "rm -rf .svelte-kit build node_modules test-results"
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@
|
|||
</div>
|
||||
{/if}
|
||||
{:else if (isText || (isPdf && pdfViewMode === 'text')) && displayTextContent}
|
||||
<SyntaxHighlightedCode code={displayTextContent} {language} maxWidth="69rem" />
|
||||
<SyntaxHighlightedCode code={displayTextContent} {language} maxWidth="calc(69rem - 2rem)" />
|
||||
{:else if isAudio}
|
||||
<div class="flex items-center justify-center p-8">
|
||||
<div class="w-full max-w-md text-center">
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
<script lang="ts">
|
||||
import { RemoveButton } from '$lib/components/app';
|
||||
import { getFileTypeLabel, getPreviewText, formatFileSize, isTextFile } from '$lib/utils';
|
||||
import { formatFileSize, getFileTypeLabel, getPreviewText, isTextFile } from '$lib/utils';
|
||||
import { AttachmentType } from '$lib/enums';
|
||||
|
||||
interface Props {
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@
|
|||
MimeTypeImage,
|
||||
MimeTypeText
|
||||
} from '$lib/enums';
|
||||
import { isIMEComposing } from '$lib/utils';
|
||||
import { isIMEComposing, parseClipboardContent } from '$lib/utils';
|
||||
import {
|
||||
AudioRecorder,
|
||||
convertToWav,
|
||||
|
|
@ -191,7 +191,6 @@
|
|||
|
||||
if ((!message.trim() && uploadedFiles.length === 0) || disabled || isLoading) return;
|
||||
|
||||
// Check if model is selected first
|
||||
if (!checkModelSelected()) return;
|
||||
|
||||
const messageToSend = message.trim();
|
||||
|
|
@ -228,6 +227,31 @@
|
|||
|
||||
const text = event.clipboardData.getData(MimeTypeText.PLAIN);
|
||||
|
||||
if (text.startsWith('"')) {
|
||||
const parsed = parseClipboardContent(text);
|
||||
|
||||
if (parsed.textAttachments.length > 0) {
|
||||
event.preventDefault();
|
||||
|
||||
message = parsed.message;
|
||||
|
||||
const attachmentFiles = parsed.textAttachments.map(
|
||||
(att) =>
|
||||
new File([att.content], att.name, {
|
||||
type: MimeTypeText.PLAIN
|
||||
})
|
||||
);
|
||||
|
||||
onFileUpload?.(attachmentFiles);
|
||||
|
||||
setTimeout(() => {
|
||||
textareaRef?.focus();
|
||||
}, 10);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
text.length > 0 &&
|
||||
pasteLongTextToFileLength > 0 &&
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@
|
|||
|
||||
<div class="flex items-center gap-1 {className}">
|
||||
<DropdownMenu.Root>
|
||||
<DropdownMenu.Trigger name="Attach files">
|
||||
<DropdownMenu.Trigger name="Attach files" {disabled}>
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<Button
|
||||
|
|
|
|||
|
|
@ -173,6 +173,7 @@
|
|||
/>
|
||||
|
||||
<ModelsSelector
|
||||
{disabled}
|
||||
bind:this={selectorModelRef}
|
||||
currentModel={conversationModel}
|
||||
forceForegroundText={true}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
<script lang="ts">
|
||||
import { chatStore } from '$lib/stores/chat.svelte';
|
||||
import { copyToClipboard, isIMEComposing } from '$lib/utils';
|
||||
import { config } from '$lib/stores/settings.svelte';
|
||||
import { copyToClipboard, isIMEComposing, formatMessageForClipboard } from '$lib/utils';
|
||||
import ChatMessageAssistant from './ChatMessageAssistant.svelte';
|
||||
import ChatMessageUser from './ChatMessageUser.svelte';
|
||||
import ChatMessageSystem from './ChatMessageSystem.svelte';
|
||||
|
|
@ -87,7 +88,9 @@
|
|||
}
|
||||
|
||||
async function handleCopy() {
|
||||
await copyToClipboard(message.content, 'Message copied to clipboard');
|
||||
const asPlainText = Boolean(config().copyTextAttachmentsAsPlainText);
|
||||
const clipboardContent = formatMessageForClipboard(message.content, message.extra, asPlainText);
|
||||
await copyToClipboard(clipboardContent, 'Message copied to clipboard');
|
||||
onCopy?.(message);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -57,6 +57,11 @@
|
|||
label: 'Paste long text to file length',
|
||||
type: 'input'
|
||||
},
|
||||
{
|
||||
key: 'copyTextAttachmentsAsPlainText',
|
||||
label: 'Copy text attachments as plain text',
|
||||
type: 'checkbox'
|
||||
},
|
||||
{
|
||||
key: 'enableContinueGeneration',
|
||||
label: 'Enable "Continue" button',
|
||||
|
|
@ -109,6 +114,16 @@
|
|||
key: 'disableAutoScroll',
|
||||
label: 'Disable automatic scroll',
|
||||
type: 'checkbox'
|
||||
},
|
||||
{
|
||||
key: 'alwaysShowSidebarOnDesktop',
|
||||
label: 'Always show sidebar on desktop',
|
||||
type: 'checkbox'
|
||||
},
|
||||
{
|
||||
key: 'autoShowSidebarOnNewChat',
|
||||
label: 'Auto-show sidebar on new chat',
|
||||
type: 'checkbox'
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
@ -404,7 +419,7 @@
|
|||
</div>
|
||||
|
||||
<!-- Mobile Header with Horizontal Scrollable Menu -->
|
||||
<div class="flex flex-col md:hidden">
|
||||
<div class="flex flex-col pt-6 md:hidden">
|
||||
<div class="border-b border-border/30 py-4">
|
||||
<!-- Horizontal Scrollable Category Menu with Navigation -->
|
||||
<div class="relative flex items-center" style="scroll-padding: 1rem;">
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
<script lang="ts">
|
||||
import { Download, Upload } from '@lucide/svelte';
|
||||
import { Download, Upload, Trash2 } from '@lucide/svelte';
|
||||
import { Button } from '$lib/components/ui/button';
|
||||
import { DialogConversationSelection } from '$lib/components/app';
|
||||
import { createMessageCountMap } from '$lib/utils';
|
||||
import { conversationsStore, conversations } from '$lib/stores/conversations.svelte';
|
||||
import { toast } from 'svelte-sonner';
|
||||
import DialogConfirmation from '$lib/components/app/dialogs/DialogConfirmation.svelte';
|
||||
|
||||
let exportedConversations = $state<DatabaseConversation[]>([]);
|
||||
let importedConversations = $state<DatabaseConversation[]>([]);
|
||||
|
|
@ -18,11 +20,14 @@
|
|||
[]
|
||||
);
|
||||
|
||||
// Delete functionality state
|
||||
let showDeleteDialog = $state(false);
|
||||
|
||||
async function handleExportClick() {
|
||||
try {
|
||||
const allConversations = conversations();
|
||||
if (allConversations.length === 0) {
|
||||
alert('No conversations to export');
|
||||
toast.info('No conversations to export');
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -145,6 +150,36 @@
|
|||
alert('Failed to import conversations. Please check the file format.');
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDeleteAllClick() {
|
||||
try {
|
||||
const allConversations = conversations();
|
||||
|
||||
if (allConversations.length === 0) {
|
||||
toast.info('No conversations to delete');
|
||||
return;
|
||||
}
|
||||
|
||||
showDeleteDialog = true;
|
||||
} catch (err) {
|
||||
console.error('Failed to load conversations for deletion:', err);
|
||||
toast.error('Failed to load conversations');
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDeleteAllConfirm() {
|
||||
try {
|
||||
await conversationsStore.deleteAll();
|
||||
|
||||
showDeleteDialog = false;
|
||||
} catch (err) {
|
||||
console.error('Failed to delete conversations:', err);
|
||||
}
|
||||
}
|
||||
|
||||
function handleDeleteAllCancel() {
|
||||
showDeleteDialog = false;
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="space-y-6">
|
||||
|
|
@ -229,6 +264,25 @@
|
|||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
<div class="grid border-t border-border/30 pt-4">
|
||||
<h4 class="mb-2 text-sm font-medium text-destructive">Delete All Conversations</h4>
|
||||
|
||||
<p class="mb-4 text-sm text-muted-foreground">
|
||||
Permanently delete all conversations and their messages. This action cannot be undone.
|
||||
Consider exporting your conversations first if you want to keep a backup.
|
||||
</p>
|
||||
|
||||
<Button
|
||||
class="text-destructive-foreground w-full justify-start justify-self-start bg-destructive hover:bg-destructive/80 md:w-auto"
|
||||
onclick={handleDeleteAllClick}
|
||||
variant="destructive"
|
||||
>
|
||||
<Trash2 class="mr-2 h-4 w-4" />
|
||||
|
||||
Delete all conversations
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
@ -249,3 +303,15 @@
|
|||
onCancel={() => (showImportDialog = false)}
|
||||
onConfirm={handleImportConfirm}
|
||||
/>
|
||||
|
||||
<DialogConfirmation
|
||||
bind:open={showDeleteDialog}
|
||||
title="Delete all conversations"
|
||||
description="Are you sure you want to delete all conversations? This action cannot be undone and will permanently remove all your conversations and messages."
|
||||
confirmText="Delete All"
|
||||
cancelText="Cancel"
|
||||
variant="destructive"
|
||||
icon={Trash2}
|
||||
onConfirm={handleDeleteAllConfirm}
|
||||
onCancel={handleDeleteAllCancel}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
import Input from '$lib/components/ui/input/input.svelte';
|
||||
import { conversationsStore, conversations } from '$lib/stores/conversations.svelte';
|
||||
import { chatStore } from '$lib/stores/chat.svelte';
|
||||
import { getPreviewText } from '$lib/utils/text';
|
||||
import ChatSidebarActions from './ChatSidebarActions.svelte';
|
||||
|
||||
const sidebar = Sidebar.useSidebar();
|
||||
|
|
@ -20,6 +21,9 @@
|
|||
let showEditDialog = $state(false);
|
||||
let selectedConversation = $state<DatabaseConversation | null>(null);
|
||||
let editedName = $state('');
|
||||
let selectedConversationNamePreview = $derived.by(() =>
|
||||
selectedConversation ? getPreviewText(selectedConversation.name) : ''
|
||||
);
|
||||
|
||||
let filteredConversations = $derived.by(() => {
|
||||
if (searchQuery.trim().length > 0) {
|
||||
|
|
@ -162,7 +166,7 @@
|
|||
bind:open={showDeleteDialog}
|
||||
title="Delete Conversation"
|
||||
description={selectedConversation
|
||||
? `Are you sure you want to delete "${selectedConversation.name}"? This action cannot be undone and will permanently remove all messages in this conversation.`
|
||||
? `Are you sure you want to delete "${selectedConversationNamePreview}"? This action cannot be undone and will permanently remove all messages in this conversation.`
|
||||
: ''}
|
||||
confirmText="Delete"
|
||||
cancelText="Cancel"
|
||||
|
|
|
|||
|
|
@ -504,6 +504,14 @@
|
|||
background: hsl(var(--muted) / 0.1);
|
||||
}
|
||||
|
||||
/* User message markdown should keep table borders visible on light primary backgrounds */
|
||||
div.markdown-user-content :global(table),
|
||||
div.markdown-user-content :global(th),
|
||||
div.markdown-user-content :global(td),
|
||||
div.markdown-user-content :global(.table-wrapper) {
|
||||
border-color: currentColor;
|
||||
}
|
||||
|
||||
/* Horizontal rules */
|
||||
div :global(hr) {
|
||||
border: none;
|
||||
|
|
@ -642,6 +650,21 @@
|
|||
background: var(--muted);
|
||||
}
|
||||
|
||||
/* Disable hover effects when rendering user messages */
|
||||
.markdown-user-content :global(a),
|
||||
.markdown-user-content :global(a:hover) {
|
||||
color: var(--primary-foreground);
|
||||
}
|
||||
|
||||
.markdown-user-content :global(table:hover) {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.markdown-user-content :global(th:hover),
|
||||
.markdown-user-content :global(td:hover) {
|
||||
background: inherit;
|
||||
}
|
||||
|
||||
/* Enhanced blockquotes */
|
||||
div :global(blockquote) {
|
||||
transition: all 0.2s ease;
|
||||
|
|
|
|||
|
|
@ -72,9 +72,10 @@
|
|||
|
||||
<div
|
||||
class="code-preview-wrapper overflow-auto rounded-lg border border-border bg-muted {className}"
|
||||
style="max-height: {maxHeight};"
|
||||
style="max-height: {maxHeight}; max-width: {maxWidth};"
|
||||
>
|
||||
<pre class="m-0 overflow-x-auto p-4 max-w-[{maxWidth}]"><code class="hljs text-sm leading-relaxed"
|
||||
<!-- Needs to be formatted as single line for proper rendering -->
|
||||
<pre class="m-0 overflow-x-auto p-4"><code class="hljs text-sm leading-relaxed"
|
||||
>{@html highlightedHtml}</code
|
||||
></pre>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -179,51 +179,37 @@
|
|||
});
|
||||
});
|
||||
|
||||
// Handle changes to the model selector pop-down or the model dialog, depending on if the server is in
|
||||
// router mode or not.
|
||||
function handleOpenChange(open: boolean) {
|
||||
if (loading || updating) return;
|
||||
|
||||
if (open) {
|
||||
isOpen = true;
|
||||
searchTerm = '';
|
||||
highlightedIndex = -1;
|
||||
if (isRouter) {
|
||||
if (open) {
|
||||
isOpen = true;
|
||||
searchTerm = '';
|
||||
highlightedIndex = -1;
|
||||
|
||||
// Focus search input after popover opens
|
||||
tick().then(() => {
|
||||
requestAnimationFrame(() => searchInputRef?.focus());
|
||||
});
|
||||
// Focus search input after popover opens
|
||||
tick().then(() => {
|
||||
requestAnimationFrame(() => searchInputRef?.focus());
|
||||
});
|
||||
|
||||
if (isRouter) {
|
||||
modelsStore.fetchRouterModels().then(() => {
|
||||
modelsStore.fetchModalitiesForLoadedModels();
|
||||
});
|
||||
} else {
|
||||
isOpen = false;
|
||||
searchTerm = '';
|
||||
highlightedIndex = -1;
|
||||
}
|
||||
} else {
|
||||
isOpen = false;
|
||||
searchTerm = '';
|
||||
highlightedIndex = -1;
|
||||
showModelDialog = open;
|
||||
}
|
||||
}
|
||||
|
||||
function handleTriggerClick() {
|
||||
if (loading || updating) return;
|
||||
|
||||
if (!isRouter) {
|
||||
// Single model mode: show dialog instead of popover
|
||||
showModelDialog = true;
|
||||
}
|
||||
// For router mode, the Popover handles open/close
|
||||
}
|
||||
|
||||
export function open() {
|
||||
if (isRouter) {
|
||||
handleOpenChange(true);
|
||||
} else {
|
||||
showModelDialog = true;
|
||||
}
|
||||
}
|
||||
|
||||
function closeMenu() {
|
||||
handleOpenChange(false);
|
||||
handleOpenChange(true);
|
||||
}
|
||||
|
||||
function handleSearchKeyDown(event: KeyboardEvent) {
|
||||
|
|
@ -292,7 +278,7 @@
|
|||
}
|
||||
|
||||
if (shouldCloseMenu) {
|
||||
closeMenu();
|
||||
handleOpenChange(false);
|
||||
|
||||
// Focus the chat textarea after model selection
|
||||
requestAnimationFrame(() => {
|
||||
|
|
@ -360,8 +346,181 @@
|
|||
{:else}
|
||||
{@const selectedOption = getDisplayOption()}
|
||||
|
||||
<Popover.Root bind:open={isOpen} onOpenChange={handleOpenChange}>
|
||||
<Popover.Trigger
|
||||
{#if isRouter}
|
||||
<Popover.Root bind:open={isOpen} onOpenChange={handleOpenChange}>
|
||||
<Popover.Trigger
|
||||
class={cn(
|
||||
`inline-flex cursor-pointer items-center gap-1.5 rounded-sm bg-muted-foreground/10 px-1.5 py-1 text-xs transition hover:text-foreground focus:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-60`,
|
||||
!isCurrentModelInCache()
|
||||
? 'bg-red-400/10 !text-red-400 hover:bg-red-400/20 hover:text-red-400'
|
||||
: forceForegroundText
|
||||
? 'text-foreground'
|
||||
: isHighlightedCurrentModelActive
|
||||
? 'text-foreground'
|
||||
: 'text-muted-foreground',
|
||||
isOpen ? 'text-foreground' : ''
|
||||
)}
|
||||
style="max-width: min(calc(100cqw - 6.5rem), 32rem)"
|
||||
disabled={disabled || updating}
|
||||
>
|
||||
<Package class="h-3.5 w-3.5" />
|
||||
|
||||
<span class="truncate font-medium">
|
||||
{selectedOption?.model || 'Select model'}
|
||||
</span>
|
||||
|
||||
{#if updating}
|
||||
<Loader2 class="h-3 w-3.5 animate-spin" />
|
||||
{:else}
|
||||
<ChevronDown class="h-3 w-3.5" />
|
||||
{/if}
|
||||
</Popover.Trigger>
|
||||
|
||||
<Popover.Content
|
||||
class="group/popover-content w-96 max-w-[calc(100vw-2rem)] p-0"
|
||||
align="end"
|
||||
sideOffset={8}
|
||||
collisionPadding={16}
|
||||
>
|
||||
<div class="flex max-h-[50dvh] flex-col overflow-hidden">
|
||||
<div
|
||||
class="order-1 shrink-0 border-b p-4 group-data-[side=top]/popover-content:order-2 group-data-[side=top]/popover-content:border-t group-data-[side=top]/popover-content:border-b-0"
|
||||
>
|
||||
<SearchInput
|
||||
id="model-search"
|
||||
placeholder="Search models..."
|
||||
bind:value={searchTerm}
|
||||
bind:ref={searchInputRef}
|
||||
onClose={() => handleOpenChange(false)}
|
||||
onKeyDown={handleSearchKeyDown}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
class="models-list order-2 min-h-0 flex-1 overflow-y-auto group-data-[side=top]/popover-content:order-1"
|
||||
>
|
||||
{#if !isCurrentModelInCache() && currentModel}
|
||||
<!-- Show unavailable model as first option (disabled) -->
|
||||
<button
|
||||
type="button"
|
||||
class="flex w-full cursor-not-allowed items-center bg-red-400/10 px-4 py-2 text-left text-sm text-red-400"
|
||||
role="option"
|
||||
aria-selected="true"
|
||||
aria-disabled="true"
|
||||
disabled
|
||||
>
|
||||
<span class="truncate">{selectedOption?.name || currentModel}</span>
|
||||
<span class="ml-2 text-xs whitespace-nowrap opacity-70">(not available)</span>
|
||||
</button>
|
||||
<div class="my-1 h-px bg-border"></div>
|
||||
{/if}
|
||||
{#if filteredOptions.length === 0}
|
||||
<p class="px-4 py-3 text-sm text-muted-foreground">No models found.</p>
|
||||
{/if}
|
||||
{#each filteredOptions as option, index (option.id)}
|
||||
{@const status = getModelStatus(option.model)}
|
||||
{@const isLoaded = status === ServerModelStatus.LOADED}
|
||||
{@const isLoading = status === ServerModelStatus.LOADING}
|
||||
{@const isSelected = currentModel === option.model || activeId === option.id}
|
||||
{@const isCompatible = isModelCompatible(option)}
|
||||
{@const isHighlighted = index === highlightedIndex}
|
||||
{@const missingModalities = getMissingModalities(option)}
|
||||
|
||||
<div
|
||||
class={cn(
|
||||
'group flex w-full items-center gap-2 px-4 py-2 text-left text-sm transition focus:outline-none',
|
||||
isCompatible
|
||||
? 'cursor-pointer hover:bg-muted focus:bg-muted'
|
||||
: 'cursor-not-allowed opacity-50',
|
||||
isSelected || isHighlighted
|
||||
? 'bg-accent text-accent-foreground'
|
||||
: isCompatible
|
||||
? 'hover:bg-accent hover:text-accent-foreground'
|
||||
: '',
|
||||
isLoaded ? 'text-popover-foreground' : 'text-muted-foreground'
|
||||
)}
|
||||
role="option"
|
||||
aria-selected={isSelected || isHighlighted}
|
||||
aria-disabled={!isCompatible}
|
||||
tabindex={isCompatible ? 0 : -1}
|
||||
onclick={() => isCompatible && handleSelect(option.id)}
|
||||
onmouseenter={() => (highlightedIndex = index)}
|
||||
onkeydown={(e) => {
|
||||
if (isCompatible && (e.key === 'Enter' || e.key === ' ')) {
|
||||
e.preventDefault();
|
||||
handleSelect(option.id);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<span class="min-w-0 flex-1 truncate">{option.model}</span>
|
||||
|
||||
{#if missingModalities}
|
||||
<span class="flex shrink-0 items-center gap-1 text-muted-foreground/70">
|
||||
{#if missingModalities.vision}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<EyeOff class="h-3.5 w-3.5" />
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>No vision support</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{/if}
|
||||
{#if missingModalities.audio}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<MicOff class="h-3.5 w-3.5" />
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>No audio support</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{/if}
|
||||
</span>
|
||||
{/if}
|
||||
|
||||
{#if isLoading}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<Loader2 class="h-4 w-4 shrink-0 animate-spin text-muted-foreground" />
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>Loading model...</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{:else if isLoaded}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<button
|
||||
type="button"
|
||||
class="relative ml-2 flex h-4 w-4 shrink-0 items-center justify-center"
|
||||
onclick={(e) => {
|
||||
e.stopPropagation();
|
||||
modelsStore.unloadModel(option.model);
|
||||
}}
|
||||
>
|
||||
<span
|
||||
class="mr-2 h-2 w-2 rounded-full bg-green-500 transition-opacity group-hover:opacity-0"
|
||||
></span>
|
||||
<Power
|
||||
class="absolute mr-2 h-4 w-4 text-red-500 opacity-0 transition-opacity group-hover:opacity-100 hover:text-red-600"
|
||||
/>
|
||||
</button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>Unload model</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{:else}
|
||||
<span class="mx-2 h-2 w-2 rounded-full bg-muted-foreground/50"></span>
|
||||
{/if}
|
||||
</div>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
</Popover.Content>
|
||||
</Popover.Root>
|
||||
{:else}
|
||||
<button
|
||||
class={cn(
|
||||
`inline-flex cursor-pointer items-center gap-1.5 rounded-sm bg-muted-foreground/10 px-1.5 py-1 text-xs transition hover:text-foreground focus:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-60`,
|
||||
!isCurrentModelInCache()
|
||||
|
|
@ -374,165 +533,20 @@
|
|||
isOpen ? 'text-foreground' : ''
|
||||
)}
|
||||
style="max-width: min(calc(100cqw - 6.5rem), 32rem)"
|
||||
onclick={handleTriggerClick}
|
||||
disabled={disabled || updating || !isRouter}
|
||||
onclick={() => handleOpenChange(true)}
|
||||
disabled={disabled || updating}
|
||||
>
|
||||
<Package class="h-3.5 w-3.5" />
|
||||
|
||||
<span class="truncate font-medium">
|
||||
{selectedOption?.model || 'Select model'}
|
||||
{selectedOption?.model}
|
||||
</span>
|
||||
|
||||
{#if updating}
|
||||
<Loader2 class="h-3 w-3.5 animate-spin" />
|
||||
{:else if isRouter}
|
||||
<ChevronDown class="h-3 w-3.5" />
|
||||
{/if}
|
||||
</Popover.Trigger>
|
||||
|
||||
<Popover.Content
|
||||
class="group/popover-content w-96 max-w-[calc(100vw-2rem)] p-0"
|
||||
align="end"
|
||||
sideOffset={8}
|
||||
collisionPadding={16}
|
||||
>
|
||||
<div class="flex max-h-[50dvh] flex-col overflow-hidden">
|
||||
<div
|
||||
class="order-1 shrink-0 border-b p-4 group-data-[side=top]/popover-content:order-2 group-data-[side=top]/popover-content:border-t group-data-[side=top]/popover-content:border-b-0"
|
||||
>
|
||||
<SearchInput
|
||||
id="model-search"
|
||||
placeholder="Search models..."
|
||||
bind:value={searchTerm}
|
||||
bind:ref={searchInputRef}
|
||||
onClose={closeMenu}
|
||||
onKeyDown={handleSearchKeyDown}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
class="models-list order-2 min-h-0 flex-1 overflow-y-auto group-data-[side=top]/popover-content:order-1"
|
||||
>
|
||||
{#if !isCurrentModelInCache() && currentModel}
|
||||
<!-- Show unavailable model as first option (disabled) -->
|
||||
<button
|
||||
type="button"
|
||||
class="flex w-full cursor-not-allowed items-center bg-red-400/10 px-4 py-2 text-left text-sm text-red-400"
|
||||
role="option"
|
||||
aria-selected="true"
|
||||
aria-disabled="true"
|
||||
disabled
|
||||
>
|
||||
<span class="truncate">{selectedOption?.name || currentModel}</span>
|
||||
<span class="ml-2 text-xs whitespace-nowrap opacity-70">(not available)</span>
|
||||
</button>
|
||||
<div class="my-1 h-px bg-border"></div>
|
||||
{/if}
|
||||
{#if filteredOptions.length === 0}
|
||||
<p class="px-4 py-3 text-sm text-muted-foreground">No models found.</p>
|
||||
{/if}
|
||||
{#each filteredOptions as option, index (option.id)}
|
||||
{@const status = getModelStatus(option.model)}
|
||||
{@const isLoaded = status === ServerModelStatus.LOADED}
|
||||
{@const isLoading = status === ServerModelStatus.LOADING}
|
||||
{@const isSelected = currentModel === option.model || activeId === option.id}
|
||||
{@const isCompatible = isModelCompatible(option)}
|
||||
{@const isHighlighted = index === highlightedIndex}
|
||||
{@const missingModalities = getMissingModalities(option)}
|
||||
|
||||
<div
|
||||
class={cn(
|
||||
'group flex w-full items-center gap-2 px-4 py-2 text-left text-sm transition focus:outline-none',
|
||||
isCompatible
|
||||
? 'cursor-pointer hover:bg-muted focus:bg-muted'
|
||||
: 'cursor-not-allowed opacity-50',
|
||||
isSelected || isHighlighted
|
||||
? 'bg-accent text-accent-foreground'
|
||||
: isCompatible
|
||||
? 'hover:bg-accent hover:text-accent-foreground'
|
||||
: '',
|
||||
isLoaded ? 'text-popover-foreground' : 'text-muted-foreground'
|
||||
)}
|
||||
role="option"
|
||||
aria-selected={isSelected || isHighlighted}
|
||||
aria-disabled={!isCompatible}
|
||||
tabindex={isCompatible ? 0 : -1}
|
||||
onclick={() => isCompatible && handleSelect(option.id)}
|
||||
onmouseenter={() => (highlightedIndex = index)}
|
||||
onkeydown={(e) => {
|
||||
if (isCompatible && (e.key === 'Enter' || e.key === ' ')) {
|
||||
e.preventDefault();
|
||||
handleSelect(option.id);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<span class="min-w-0 flex-1 truncate">{option.model}</span>
|
||||
|
||||
{#if missingModalities}
|
||||
<span class="flex shrink-0 items-center gap-1 text-muted-foreground/70">
|
||||
{#if missingModalities.vision}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<EyeOff class="h-3.5 w-3.5" />
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>No vision support</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{/if}
|
||||
{#if missingModalities.audio}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<MicOff class="h-3.5 w-3.5" />
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>No audio support</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{/if}
|
||||
</span>
|
||||
{/if}
|
||||
|
||||
{#if isLoading}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<Loader2 class="h-4 w-4 shrink-0 animate-spin text-muted-foreground" />
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>Loading model...</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{:else if isLoaded}
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger>
|
||||
<button
|
||||
type="button"
|
||||
class="relative ml-2 flex h-4 w-4 shrink-0 items-center justify-center"
|
||||
onclick={(e) => {
|
||||
e.stopPropagation();
|
||||
modelsStore.unloadModel(option.model);
|
||||
}}
|
||||
>
|
||||
<span
|
||||
class="mr-2 h-2 w-2 rounded-full bg-green-500 transition-opacity group-hover:opacity-0"
|
||||
></span>
|
||||
<Power
|
||||
class="absolute mr-2 h-4 w-4 text-red-500 opacity-0 transition-opacity group-hover:opacity-100 hover:text-red-600"
|
||||
/>
|
||||
</button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content class="z-[9999]">
|
||||
<p>Unload model</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
{:else}
|
||||
<span class="mx-2 h-2 w-2 rounded-full bg-muted-foreground/50"></span>
|
||||
{/if}
|
||||
</div>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
</Popover.Content>
|
||||
</Popover.Root>
|
||||
</button>
|
||||
{/if}
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,12 @@ export const SETTING_CONFIG_DEFAULT: Record<string, string | number | boolean> =
|
|||
showMessageStats: true,
|
||||
askForTitleConfirmation: false,
|
||||
pasteLongTextToFileLen: 2500,
|
||||
copyTextAttachmentsAsPlainText: false,
|
||||
pdfAsImage: false,
|
||||
disableAutoScroll: false,
|
||||
renderUserContentAsMarkdown: false,
|
||||
alwaysShowSidebarOnDesktop: false,
|
||||
autoShowSidebarOnNewChat: true,
|
||||
autoMicOnEmpty: false,
|
||||
// make sure these default values are in sync with `common.h`
|
||||
samplers: 'top_k;typ_p;top_p;min_p;temperature',
|
||||
|
|
@ -50,6 +53,8 @@ export const SETTING_CONFIG_INFO: Record<string, string> = {
|
|||
'Choose the color theme for the interface. You can choose between System (follows your device settings), Light, or Dark.',
|
||||
pasteLongTextToFileLen:
|
||||
'On pasting long text, it will be converted to a file. You can control the file length by setting the value of this parameter. Value 0 means disable.',
|
||||
copyTextAttachmentsAsPlainText:
|
||||
'When copying a message with text attachments, combine them into a single plain text string instead of a special format that can be pasted back as attachments.',
|
||||
samplers:
|
||||
'The order at which samplers are applied, in simplified way. Default is "top_k;typ_p;top_p;min_p;temperature": top_k->typ_p->top_p->min_p->temperature',
|
||||
temperature:
|
||||
|
|
@ -96,6 +101,10 @@ export const SETTING_CONFIG_INFO: Record<string, string> = {
|
|||
disableAutoScroll:
|
||||
'Disable automatic scrolling while messages stream so you can control the viewport position manually.',
|
||||
renderUserContentAsMarkdown: 'Render user messages using markdown formatting in the chat.',
|
||||
alwaysShowSidebarOnDesktop:
|
||||
'Always keep the sidebar visible on desktop instead of auto-hiding it.',
|
||||
autoShowSidebarOnNewChat:
|
||||
'Automatically show sidebar when starting a new chat. Disable to keep the sidebar hidden until you click on it.',
|
||||
autoMicOnEmpty:
|
||||
'Automatically show microphone button instead of send button when textarea is empty for models with audio modality support.',
|
||||
pyInterpreterEnabled:
|
||||
|
|
|
|||
|
|
@ -385,8 +385,7 @@ class ConversationsStore {
|
|||
this.conversations = this.conversations.filter((c) => c.id !== convId);
|
||||
|
||||
if (this.activeConversation?.id === convId) {
|
||||
this.activeConversation = null;
|
||||
this.activeMessages = [];
|
||||
this.clearActiveConversation();
|
||||
await goto(`?new_chat=true#/`);
|
||||
}
|
||||
} catch (error) {
|
||||
|
|
@ -394,6 +393,29 @@ class ConversationsStore {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all conversations and their messages
|
||||
*/
|
||||
async deleteAll(): Promise<void> {
|
||||
try {
|
||||
const allConversations = await DatabaseService.getAllConversations();
|
||||
|
||||
for (const conv of allConversations) {
|
||||
await DatabaseService.deleteConversation(conv.id);
|
||||
}
|
||||
|
||||
this.clearActiveConversation();
|
||||
this.conversations = [];
|
||||
|
||||
toast.success('All conversations deleted');
|
||||
|
||||
await goto(`?new_chat=true#/`);
|
||||
} catch (error) {
|
||||
console.error('Failed to delete all conversations:', error);
|
||||
toast.error('Failed to delete conversations');
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Import/Export
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
|
|
|||
|
|
@ -0,0 +1,262 @@
|
|||
import { toast } from 'svelte-sonner';
|
||||
import { AttachmentType } from '$lib/enums';
|
||||
import type {
|
||||
DatabaseMessageExtra,
|
||||
DatabaseMessageExtraTextFile,
|
||||
DatabaseMessageExtraLegacyContext
|
||||
} from '$lib/types/database';
|
||||
|
||||
/**
|
||||
* Copy text to clipboard with toast notification
|
||||
* Uses modern clipboard API when available, falls back to legacy method for non-secure contexts
|
||||
* @param text - Text to copy to clipboard
|
||||
* @param successMessage - Custom success message (optional)
|
||||
* @param errorMessage - Custom error message (optional)
|
||||
* @returns Promise<boolean> - True if successful, false otherwise
|
||||
*/
|
||||
export async function copyToClipboard(
|
||||
text: string,
|
||||
successMessage = 'Copied to clipboard',
|
||||
errorMessage = 'Failed to copy to clipboard'
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
// Try modern clipboard API first (secure contexts only)
|
||||
if (navigator.clipboard && navigator.clipboard.writeText) {
|
||||
await navigator.clipboard.writeText(text);
|
||||
toast.success(successMessage);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fallback for non-secure contexts
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = text;
|
||||
textArea.style.position = 'fixed';
|
||||
textArea.style.left = '-999999px';
|
||||
textArea.style.top = '-999999px';
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
|
||||
const successful = document.execCommand('copy');
|
||||
document.body.removeChild(textArea);
|
||||
|
||||
if (successful) {
|
||||
toast.success(successMessage);
|
||||
return true;
|
||||
} else {
|
||||
throw new Error('execCommand failed');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to copy to clipboard:', error);
|
||||
toast.error(errorMessage);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy code with HTML entity decoding and toast notification
|
||||
* @param rawCode - Raw code string that may contain HTML entities
|
||||
* @param successMessage - Custom success message (optional)
|
||||
* @param errorMessage - Custom error message (optional)
|
||||
* @returns Promise<boolean> - True if successful, false otherwise
|
||||
*/
|
||||
export async function copyCodeToClipboard(
|
||||
rawCode: string,
|
||||
successMessage = 'Code copied to clipboard',
|
||||
errorMessage = 'Failed to copy code'
|
||||
): Promise<boolean> {
|
||||
const doc = new DOMParser().parseFromString(rawCode, 'text/html');
|
||||
const decodedCode = doc.body.textContent ?? rawCode;
|
||||
|
||||
return copyToClipboard(decodedCode, successMessage, errorMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Format for text attachments when copied to clipboard
|
||||
*/
|
||||
export interface ClipboardTextAttachment {
|
||||
type: typeof AttachmentType.TEXT;
|
||||
name: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parsed result from clipboard content
|
||||
*/
|
||||
export interface ParsedClipboardContent {
|
||||
message: string;
|
||||
textAttachments: ClipboardTextAttachment[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a message with text attachments for clipboard copying.
|
||||
*
|
||||
* Default format (asPlainText = false):
|
||||
* ```
|
||||
* "Text message content"
|
||||
* [
|
||||
* {"type":"TEXT","name":"filename.txt","content":"..."},
|
||||
* {"type":"TEXT","name":"another.txt","content":"..."}
|
||||
* ]
|
||||
* ```
|
||||
*
|
||||
* Plain text format (asPlainText = true):
|
||||
* ```
|
||||
* Text message content
|
||||
*
|
||||
* file content here
|
||||
*
|
||||
* another file content
|
||||
* ```
|
||||
*
|
||||
* @param content - The message text content
|
||||
* @param extras - Optional array of message attachments
|
||||
* @param asPlainText - If true, format as plain text without JSON structure
|
||||
* @returns Formatted string for clipboard
|
||||
*/
|
||||
export function formatMessageForClipboard(
|
||||
content: string,
|
||||
extras?: DatabaseMessageExtra[],
|
||||
asPlainText: boolean = false
|
||||
): string {
|
||||
// Filter only text attachments (TEXT type and legacy CONTEXT type)
|
||||
const textAttachments =
|
||||
extras?.filter(
|
||||
(extra): extra is DatabaseMessageExtraTextFile | DatabaseMessageExtraLegacyContext =>
|
||||
extra.type === AttachmentType.TEXT || extra.type === AttachmentType.LEGACY_CONTEXT
|
||||
) ?? [];
|
||||
|
||||
if (textAttachments.length === 0) {
|
||||
return content;
|
||||
}
|
||||
|
||||
if (asPlainText) {
|
||||
const parts = [content];
|
||||
for (const att of textAttachments) {
|
||||
parts.push(att.content);
|
||||
}
|
||||
return parts.join('\n\n');
|
||||
}
|
||||
|
||||
const clipboardAttachments: ClipboardTextAttachment[] = textAttachments.map((att) => ({
|
||||
type: AttachmentType.TEXT,
|
||||
name: att.name,
|
||||
content: att.content
|
||||
}));
|
||||
|
||||
return `${JSON.stringify(content)}\n${JSON.stringify(clipboardAttachments, null, 2)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses clipboard content to extract message and text attachments.
|
||||
* Supports both plain text and the special format with attachments.
|
||||
*
|
||||
* @param clipboardText - Raw text from clipboard
|
||||
* @returns Parsed content with message and attachments
|
||||
*/
|
||||
export function parseClipboardContent(clipboardText: string): ParsedClipboardContent {
|
||||
const defaultResult: ParsedClipboardContent = {
|
||||
message: clipboardText,
|
||||
textAttachments: []
|
||||
};
|
||||
|
||||
if (!clipboardText.startsWith('"')) {
|
||||
return defaultResult;
|
||||
}
|
||||
|
||||
try {
|
||||
let stringEndIndex = -1;
|
||||
let escaped = false;
|
||||
|
||||
for (let i = 1; i < clipboardText.length; i++) {
|
||||
const char = clipboardText[i];
|
||||
|
||||
if (escaped) {
|
||||
escaped = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\') {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"') {
|
||||
stringEndIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (stringEndIndex === -1) {
|
||||
return defaultResult;
|
||||
}
|
||||
|
||||
const jsonStringPart = clipboardText.substring(0, stringEndIndex + 1);
|
||||
const remainingPart = clipboardText.substring(stringEndIndex + 1).trim();
|
||||
|
||||
const message = JSON.parse(jsonStringPart) as string;
|
||||
|
||||
if (!remainingPart || !remainingPart.startsWith('[')) {
|
||||
return {
|
||||
message,
|
||||
textAttachments: []
|
||||
};
|
||||
}
|
||||
|
||||
const attachments = JSON.parse(remainingPart) as unknown[];
|
||||
|
||||
const validAttachments: ClipboardTextAttachment[] = [];
|
||||
|
||||
for (const att of attachments) {
|
||||
if (isValidTextAttachment(att)) {
|
||||
validAttachments.push({
|
||||
type: AttachmentType.TEXT,
|
||||
name: att.name,
|
||||
content: att.content
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
message,
|
||||
textAttachments: validAttachments
|
||||
};
|
||||
} catch {
|
||||
return defaultResult;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard to validate a text attachment object
|
||||
* @param obj The object to validate
|
||||
* @returns true if the object is a valid text attachment
|
||||
*/
|
||||
function isValidTextAttachment(
|
||||
obj: unknown
|
||||
): obj is { type: string; name: string; content: string } {
|
||||
if (typeof obj !== 'object' || obj === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const record = obj as Record<string, unknown>;
|
||||
|
||||
return (
|
||||
(record.type === AttachmentType.TEXT || record.type === 'TEXT') &&
|
||||
typeof record.name === 'string' &&
|
||||
typeof record.content === 'string'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if clipboard content contains our special format with attachments
|
||||
* @param clipboardText - Raw text from clipboard
|
||||
* @returns true if the clipboard content contains our special format with attachments
|
||||
*/
|
||||
export function hasClipboardAttachments(clipboardText: string): boolean {
|
||||
if (!clipboardText.startsWith('"')) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const parsed = parseClipboardContent(clipboardText);
|
||||
return parsed.textAttachments.length > 0;
|
||||
}
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
import { toast } from 'svelte-sonner';
|
||||
|
||||
/**
|
||||
* Copy text to clipboard with toast notification
|
||||
* Uses modern clipboard API when available, falls back to legacy method for non-secure contexts
|
||||
* @param text - Text to copy to clipboard
|
||||
* @param successMessage - Custom success message (optional)
|
||||
* @param errorMessage - Custom error message (optional)
|
||||
* @returns Promise<boolean> - True if successful, false otherwise
|
||||
*/
|
||||
export async function copyToClipboard(
|
||||
text: string,
|
||||
successMessage = 'Copied to clipboard',
|
||||
errorMessage = 'Failed to copy to clipboard'
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
// Try modern clipboard API first (secure contexts only)
|
||||
if (navigator.clipboard && navigator.clipboard.writeText) {
|
||||
await navigator.clipboard.writeText(text);
|
||||
toast.success(successMessage);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fallback for non-secure contexts
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = text;
|
||||
textArea.style.position = 'fixed';
|
||||
textArea.style.left = '-999999px';
|
||||
textArea.style.top = '-999999px';
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
|
||||
const successful = document.execCommand('copy');
|
||||
document.body.removeChild(textArea);
|
||||
|
||||
if (successful) {
|
||||
toast.success(successMessage);
|
||||
return true;
|
||||
} else {
|
||||
throw new Error('execCommand failed');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to copy to clipboard:', error);
|
||||
toast.error(errorMessage);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy code with HTML entity decoding and toast notification
|
||||
* @param rawCode - Raw code string that may contain HTML entities
|
||||
* @param successMessage - Custom success message (optional)
|
||||
* @param errorMessage - Custom error message (optional)
|
||||
* @returns Promise<boolean> - True if successful, false otherwise
|
||||
*/
|
||||
export async function copyCodeToClipboard(
|
||||
rawCode: string,
|
||||
successMessage = 'Code copied to clipboard',
|
||||
errorMessage = 'Failed to copy code'
|
||||
): Promise<boolean> {
|
||||
// Decode HTML entities
|
||||
const decodedCode = rawCode
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, "'");
|
||||
|
||||
return copyToClipboard(decodedCode, successMessage, errorMessage);
|
||||
}
|
||||
|
|
@ -34,12 +34,3 @@ export function getFileTypeLabel(input: string | undefined): string {
|
|||
// Handle AttachmentType or other plain strings
|
||||
return input.toUpperCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncates text content for preview display
|
||||
* @param content - The text content to truncate
|
||||
* @returns Truncated content with ellipsis if needed
|
||||
*/
|
||||
export function getPreviewText(content: string): string {
|
||||
return content.length > 150 ? content.substring(0, 150) + '...' : content;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,10 +40,19 @@ export { setConfigValue, getConfigValue, configToParameterRecord } from './confi
|
|||
export { createMessageCountMap, getMessageCount } from './conversation-utils';
|
||||
|
||||
// Clipboard utilities
|
||||
export { copyToClipboard, copyCodeToClipboard } from './copy';
|
||||
export {
|
||||
copyToClipboard,
|
||||
copyCodeToClipboard,
|
||||
formatMessageForClipboard,
|
||||
parseClipboardContent,
|
||||
hasClipboardAttachments,
|
||||
type ClipboardTextAttachment,
|
||||
type ParsedClipboardContent
|
||||
} from './clipboard';
|
||||
|
||||
// File preview utilities
|
||||
export { getFileTypeLabel, getPreviewText } from './file-preview';
|
||||
export { getFileTypeLabel } from './file-preview';
|
||||
export { getPreviewText } from './text';
|
||||
|
||||
// File type utilities
|
||||
export {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
/**
|
||||
* Returns a shortened preview of the provided content capped at the given length.
|
||||
* Appends an ellipsis when the content exceeds the maximum.
|
||||
*/
|
||||
export function getPreviewText(content: string, max = 150): string {
|
||||
return content.length > max ? content.slice(0, max) + '...' : content;
|
||||
}
|
||||
|
|
@ -14,6 +14,7 @@
|
|||
import { goto } from '$app/navigation';
|
||||
import { modelsStore } from '$lib/stores/models.svelte';
|
||||
import { TOOLTIP_DELAY_DURATION } from '$lib/constants/tooltip-config';
|
||||
import { IsMobile } from '$lib/hooks/is-mobile.svelte';
|
||||
|
||||
let { children } = $props();
|
||||
|
||||
|
|
@ -21,6 +22,10 @@
|
|||
let isHomeRoute = $derived(page.route.id === '/');
|
||||
let isNewChatMode = $derived(page.url.searchParams.get('new_chat') === 'true');
|
||||
let showSidebarByDefault = $derived(activeMessages().length > 0 || isLoading());
|
||||
let alwaysShowSidebarOnDesktop = $derived(config().alwaysShowSidebarOnDesktop);
|
||||
let autoShowSidebarOnNewChat = $derived(config().autoShowSidebarOnNewChat);
|
||||
let isMobile = new IsMobile();
|
||||
let isDesktop = $derived(!isMobile.current);
|
||||
let sidebarOpen = $state(false);
|
||||
let innerHeight = $state<number | undefined>();
|
||||
let chatSidebar:
|
||||
|
|
@ -76,6 +81,11 @@
|
|||
}
|
||||
|
||||
$effect(() => {
|
||||
if (alwaysShowSidebarOnDesktop && isDesktop) {
|
||||
sidebarOpen = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (isHomeRoute && !isNewChatMode) {
|
||||
// Auto-collapse sidebar when navigating to home route (but not in new chat mode)
|
||||
sidebarOpen = false;
|
||||
|
|
@ -83,8 +93,11 @@
|
|||
// Keep sidebar open in new chat mode
|
||||
sidebarOpen = true;
|
||||
} else if (isChatRoute) {
|
||||
// On chat routes, show sidebar by default
|
||||
sidebarOpen = true;
|
||||
// On chat routes, only auto-show sidebar if setting is enabled
|
||||
if (autoShowSidebarOnNewChat) {
|
||||
sidebarOpen = true;
|
||||
}
|
||||
// If setting is disabled, don't change sidebar state - let user control it manually
|
||||
} else {
|
||||
// Other routes follow default behavior
|
||||
sidebarOpen = showSidebarByDefault;
|
||||
|
|
@ -190,12 +203,14 @@
|
|||
<ChatSidebar bind:this={chatSidebar} />
|
||||
</Sidebar.Root>
|
||||
|
||||
<Sidebar.Trigger
|
||||
class="transition-left absolute left-0 z-[900] h-8 w-8 duration-200 ease-linear {sidebarOpen
|
||||
? 'md:left-[var(--sidebar-width)]'
|
||||
: ''}"
|
||||
style="translate: 1rem 1rem;"
|
||||
/>
|
||||
{#if !(alwaysShowSidebarOnDesktop && isDesktop)}
|
||||
<Sidebar.Trigger
|
||||
class="transition-left absolute left-0 z-[900] h-8 w-8 duration-200 ease-linear {sidebarOpen
|
||||
? 'md:left-[var(--sidebar-width)]'
|
||||
: ''}"
|
||||
style="translate: 1rem 1rem;"
|
||||
/>
|
||||
{/if}
|
||||
|
||||
<Sidebar.Inset class="flex flex-1 flex-col overflow-hidden">
|
||||
{@render children?.()}
|
||||
|
|
|
|||
|
|
@ -1,7 +0,0 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
|
||||
describe('sum test', () => {
|
||||
it('adds 1 + 2 to equal 3', () => {
|
||||
expect(1 + 2).toBe(3);
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,423 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import { AttachmentType } from '$lib/enums';
|
||||
import {
|
||||
formatMessageForClipboard,
|
||||
parseClipboardContent,
|
||||
hasClipboardAttachments
|
||||
} from '$lib/utils/clipboard';
|
||||
|
||||
describe('formatMessageForClipboard', () => {
|
||||
it('returns plain content when no extras', () => {
|
||||
const result = formatMessageForClipboard('Hello world', undefined);
|
||||
expect(result).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('returns plain content when extras is empty array', () => {
|
||||
const result = formatMessageForClipboard('Hello world', []);
|
||||
expect(result).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('handles empty string content', () => {
|
||||
const result = formatMessageForClipboard('', undefined);
|
||||
expect(result).toBe('');
|
||||
});
|
||||
|
||||
it('returns plain content when extras has only non-text attachments', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.IMAGE as const,
|
||||
name: 'image.png',
|
||||
base64Url: 'data:image/png;base64,...'
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard('Hello world', extras);
|
||||
expect(result).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('filters non-text attachments and keeps only text ones', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.IMAGE as const,
|
||||
name: 'image.png',
|
||||
base64Url: 'data:image/png;base64,...'
|
||||
},
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file.txt',
|
||||
content: 'Text content'
|
||||
},
|
||||
{
|
||||
type: AttachmentType.PDF as const,
|
||||
name: 'doc.pdf',
|
||||
base64Data: 'data:application/pdf;base64,...',
|
||||
content: 'PDF content',
|
||||
processedAsImages: false
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard('Hello', extras);
|
||||
|
||||
expect(result).toContain('"file.txt"');
|
||||
expect(result).not.toContain('image.png');
|
||||
expect(result).not.toContain('doc.pdf');
|
||||
});
|
||||
|
||||
it('formats message with text attachments', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file1.txt',
|
||||
content: 'File 1 content'
|
||||
},
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file2.txt',
|
||||
content: 'File 2 content'
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard('Hello world', extras);
|
||||
|
||||
expect(result).toContain('"Hello world"');
|
||||
expect(result).toContain('"type": "TEXT"');
|
||||
expect(result).toContain('"name": "file1.txt"');
|
||||
expect(result).toContain('"content": "File 1 content"');
|
||||
expect(result).toContain('"name": "file2.txt"');
|
||||
});
|
||||
|
||||
it('handles content with quotes and special characters', () => {
|
||||
const content = 'Hello "world" with\nnewline';
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'test.txt',
|
||||
content: 'Test content'
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard(content, extras);
|
||||
|
||||
// Should be valid JSON
|
||||
expect(result.startsWith('"')).toBe(true);
|
||||
// The content should be properly escaped
|
||||
const parsed = JSON.parse(result.split('\n')[0]);
|
||||
expect(parsed).toBe(content);
|
||||
});
|
||||
|
||||
it('converts legacy context type to TEXT type', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.LEGACY_CONTEXT as const,
|
||||
name: 'legacy.txt',
|
||||
content: 'Legacy content'
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard('Hello', extras);
|
||||
|
||||
expect(result).toContain('"type": "TEXT"');
|
||||
expect(result).not.toContain('"context"');
|
||||
});
|
||||
|
||||
it('handles attachment content with special characters', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'code.js',
|
||||
content: 'const x = "hello\\nworld";\nconst y = `template ${var}`;'
|
||||
}
|
||||
];
|
||||
const formatted = formatMessageForClipboard('Check this code', extras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.textAttachments[0].content).toBe(
|
||||
'const x = "hello\\nworld";\nconst y = `template ${var}`;'
|
||||
);
|
||||
});
|
||||
|
||||
it('handles unicode characters in content and attachments', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'unicode.txt',
|
||||
content: '日本語テスト 🎉 émojis'
|
||||
}
|
||||
];
|
||||
const formatted = formatMessageForClipboard('Привет мир 👋', extras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.message).toBe('Привет мир 👋');
|
||||
expect(parsed.textAttachments[0].content).toBe('日本語テスト 🎉 émojis');
|
||||
});
|
||||
|
||||
it('formats as plain text when asPlainText is true', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file1.txt',
|
||||
content: 'File 1 content'
|
||||
},
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file2.txt',
|
||||
content: 'File 2 content'
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard('Hello world', extras, true);
|
||||
|
||||
expect(result).toBe('Hello world\n\nFile 1 content\n\nFile 2 content');
|
||||
});
|
||||
|
||||
it('returns plain content when asPlainText is true but no attachments', () => {
|
||||
const result = formatMessageForClipboard('Hello world', [], true);
|
||||
expect(result).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('plain text mode does not use JSON format', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'test.txt',
|
||||
content: 'Test content'
|
||||
}
|
||||
];
|
||||
const result = formatMessageForClipboard('Hello', extras, true);
|
||||
|
||||
expect(result).not.toContain('"type"');
|
||||
expect(result).not.toContain('[');
|
||||
expect(result).toBe('Hello\n\nTest content');
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseClipboardContent', () => {
|
||||
it('returns plain text as message when not in special format', () => {
|
||||
const result = parseClipboardContent('Hello world');
|
||||
|
||||
expect(result.message).toBe('Hello world');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('handles empty string input', () => {
|
||||
const result = parseClipboardContent('');
|
||||
|
||||
expect(result.message).toBe('');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('handles whitespace-only input', () => {
|
||||
const result = parseClipboardContent(' \n\t ');
|
||||
|
||||
expect(result.message).toBe(' \n\t ');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('returns plain text as message when starts with quote but invalid format', () => {
|
||||
const result = parseClipboardContent('"Unclosed quote');
|
||||
|
||||
expect(result.message).toBe('"Unclosed quote');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('returns original text when JSON array is malformed', () => {
|
||||
const input = '"Hello"\n[invalid json';
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('"Hello"\n[invalid json');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('parses message with text attachments', () => {
|
||||
const input = `"Hello world"
|
||||
[
|
||||
{"type":"TEXT","name":"file1.txt","content":"File 1 content"},
|
||||
{"type":"TEXT","name":"file2.txt","content":"File 2 content"}
|
||||
]`;
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('Hello world');
|
||||
expect(result.textAttachments).toHaveLength(2);
|
||||
expect(result.textAttachments[0].name).toBe('file1.txt');
|
||||
expect(result.textAttachments[0].content).toBe('File 1 content');
|
||||
expect(result.textAttachments[1].name).toBe('file2.txt');
|
||||
expect(result.textAttachments[1].content).toBe('File 2 content');
|
||||
});
|
||||
|
||||
it('handles escaped quotes in message', () => {
|
||||
const input = `"Hello \\"world\\" with quotes"
|
||||
[
|
||||
{"type":"TEXT","name":"file.txt","content":"test"}
|
||||
]`;
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('Hello "world" with quotes');
|
||||
expect(result.textAttachments).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('handles newlines in message', () => {
|
||||
const input = `"Hello\\nworld"
|
||||
[
|
||||
{"type":"TEXT","name":"file.txt","content":"test"}
|
||||
]`;
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('Hello\nworld');
|
||||
expect(result.textAttachments).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('returns message only when no array follows', () => {
|
||||
const input = '"Just a quoted string"';
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('Just a quoted string');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('filters out invalid attachment objects', () => {
|
||||
const input = `"Hello"
|
||||
[
|
||||
{"type":"TEXT","name":"valid.txt","content":"valid"},
|
||||
{"type":"INVALID","name":"invalid.txt","content":"invalid"},
|
||||
{"name":"missing-type.txt","content":"missing"},
|
||||
{"type":"TEXT","content":"missing name"}
|
||||
]`;
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('Hello');
|
||||
expect(result.textAttachments).toHaveLength(1);
|
||||
expect(result.textAttachments[0].name).toBe('valid.txt');
|
||||
});
|
||||
|
||||
it('handles empty attachments array', () => {
|
||||
const input = '"Hello"\n[]';
|
||||
|
||||
const result = parseClipboardContent(input);
|
||||
|
||||
expect(result.message).toBe('Hello');
|
||||
expect(result.textAttachments).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('roundtrips correctly with formatMessageForClipboard', () => {
|
||||
const originalContent = 'Hello "world" with\nspecial characters';
|
||||
const originalExtras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file1.txt',
|
||||
content: 'Content with\nnewlines and "quotes"'
|
||||
},
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file2.txt',
|
||||
content: 'Another file'
|
||||
}
|
||||
];
|
||||
|
||||
const formatted = formatMessageForClipboard(originalContent, originalExtras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.message).toBe(originalContent);
|
||||
expect(parsed.textAttachments).toHaveLength(2);
|
||||
expect(parsed.textAttachments[0].name).toBe('file1.txt');
|
||||
expect(parsed.textAttachments[0].content).toBe('Content with\nnewlines and "quotes"');
|
||||
expect(parsed.textAttachments[1].name).toBe('file2.txt');
|
||||
expect(parsed.textAttachments[1].content).toBe('Another file');
|
||||
});
|
||||
});
|
||||
|
||||
describe('hasClipboardAttachments', () => {
|
||||
it('returns false for plain text', () => {
|
||||
expect(hasClipboardAttachments('Hello world')).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false for empty string', () => {
|
||||
expect(hasClipboardAttachments('')).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false for quoted string without attachments', () => {
|
||||
expect(hasClipboardAttachments('"Hello world"')).toBe(false);
|
||||
});
|
||||
|
||||
it('returns true for valid format with attachments', () => {
|
||||
const input = `"Hello"
|
||||
[{"type":"TEXT","name":"file.txt","content":"test"}]`;
|
||||
|
||||
expect(hasClipboardAttachments(input)).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false for format with empty attachments array', () => {
|
||||
const input = '"Hello"\n[]';
|
||||
|
||||
expect(hasClipboardAttachments(input)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false for malformed JSON', () => {
|
||||
expect(hasClipboardAttachments('"Hello"\n[broken')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('roundtrip edge cases', () => {
|
||||
it('preserves empty message with attachments', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'file.txt',
|
||||
content: 'Content only'
|
||||
}
|
||||
];
|
||||
const formatted = formatMessageForClipboard('', extras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.message).toBe('');
|
||||
expect(parsed.textAttachments).toHaveLength(1);
|
||||
expect(parsed.textAttachments[0].content).toBe('Content only');
|
||||
});
|
||||
|
||||
it('preserves attachment with empty content', () => {
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'empty.txt',
|
||||
content: ''
|
||||
}
|
||||
];
|
||||
const formatted = formatMessageForClipboard('Message', extras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.message).toBe('Message');
|
||||
expect(parsed.textAttachments).toHaveLength(1);
|
||||
expect(parsed.textAttachments[0].content).toBe('');
|
||||
});
|
||||
|
||||
it('preserves multiple backslashes', () => {
|
||||
const content = 'Path: C:\\\\Users\\\\test\\\\file.txt';
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'path.txt',
|
||||
content: 'D:\\\\Data\\\\file'
|
||||
}
|
||||
];
|
||||
const formatted = formatMessageForClipboard(content, extras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.message).toBe(content);
|
||||
expect(parsed.textAttachments[0].content).toBe('D:\\\\Data\\\\file');
|
||||
});
|
||||
|
||||
it('preserves tabs and various whitespace', () => {
|
||||
const content = 'Line1\t\tTabbed\n Spaced\r\nCRLF';
|
||||
const extras = [
|
||||
{
|
||||
type: AttachmentType.TEXT as const,
|
||||
name: 'whitespace.txt',
|
||||
content: '\t\t\n\n '
|
||||
}
|
||||
];
|
||||
const formatted = formatMessageForClipboard(content, extras);
|
||||
const parsed = parseClipboardContent(formatted);
|
||||
|
||||
expect(parsed.message).toBe(content);
|
||||
expect(parsed.textAttachments[0].content).toBe('\t\t\n\n ');
|
||||
});
|
||||
});
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
/* eslint-disable no-irregular-whitespace */
|
||||
import { describe, it, expect, test } from 'vitest';
|
||||
import { maskInlineLaTeX, preprocessLaTeX } from './latex-protection';
|
||||
import { maskInlineLaTeX, preprocessLaTeX } from '$lib/utils/latex-protection';
|
||||
|
||||
describe('maskInlineLaTeX', () => {
|
||||
it('should protect LaTeX $x + y$ but not money $3.99', () => {
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import { describe, expect, it } from 'vitest';
|
||||
import { isValidModelName, normalizeModelName } from './model-names';
|
||||
import { isValidModelName, normalizeModelName } from '$lib/utils/model-names';
|
||||
|
||||
describe('normalizeModelName', () => {
|
||||
it('preserves Hugging Face org/model format (single slash)', () => {
|
||||
|
|
@ -125,9 +125,9 @@ export default defineConfig({
|
|||
{
|
||||
extends: './vite.config.ts',
|
||||
test: {
|
||||
name: 'server',
|
||||
name: 'unit',
|
||||
environment: 'node',
|
||||
include: ['tests/server/**/*.{test,spec}.{js,ts}']
|
||||
include: ['tests/unit/**/*.{test,spec}.{js,ts}']
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue