From 66ba51252ed1cf08739fbed8a538d3cb3541655d Mon Sep 17 00:00:00 2001 From: Gustavo Rocha Dias <91472747+gustrd@users.noreply.github.com> Date: Sat, 13 Dec 2025 08:46:36 -0300 Subject: [PATCH 01/23] cmake: correct scope - link ws2_32 for MinGW/w64devkit builds in cpp-httplib (#17972) * fix - w64devkit build * fix - w64devkit build private scope --- vendor/cpp-httplib/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vendor/cpp-httplib/CMakeLists.txt b/vendor/cpp-httplib/CMakeLists.txt index e90e8e2d1b..8f0d15d1fd 100644 --- a/vendor/cpp-httplib/CMakeLists.txt +++ b/vendor/cpp-httplib/CMakeLists.txt @@ -11,8 +11,9 @@ endif() target_link_libraries (${TARGET} PRIVATE Threads::Threads) if (WIN32 AND NOT MSVC) - target_link_libraries(${TARGET} PUBLIC ws2_32) + target_link_libraries(${TARGET} PRIVATE ws2_32) endif() + target_compile_features(${TARGET} PRIVATE cxx_std_17) target_compile_definitions(${TARGET} PRIVATE From 4d5ae24c0ac79c4e360773bac58dd2c2a46b7f67 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sat, 13 Dec 2025 12:53:37 +0100 Subject: [PATCH 02/23] arg: fix common_params_parse not accepting negated arg (#17991) --- common/arg.cpp | 5 ++++- common/arg.h | 2 +- tests/test-arg-parser.cpp | 4 ++++ tools/server/server-models.cpp | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 19f22f883f..bb2a6840ba 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -724,7 +724,7 @@ static void add_rpc_devices(const std::string & servers) { } } -bool common_params_parse(int argc, char ** argv, llama_example ex, std::map & out_map) { +bool common_params_to_map(int argc, char ** argv, llama_example ex, std::map & out_map) { common_params dummy_params; common_params_context ctx_arg = common_params_parser_init(dummy_params, ex, nullptr); @@ -733,6 +733,9 @@ bool common_params_parse(int argc, char ** argv, llama_example ex, std::map & out_map); +bool common_params_to_map(int argc, char ** argv, llama_example ex, std::map & out_map); // initialize argument parser context - used by test-arg-parser and preset common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr); diff --git a/tests/test-arg-parser.cpp b/tests/test-arg-parser.cpp index 90750b20c2..468d325e22 100644 --- a/tests/test-arg-parser.cpp +++ b/tests/test-arg-parser.cpp @@ -72,6 +72,10 @@ int main(void) { argv = {"binary_name", "--draft", "123"}; assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_EMBEDDING)); + // negated arg + argv = {"binary_name", "--no-mmap"}; + assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON)); + printf("test-arg-parser: test valid usage\n\n"); diff --git a/tools/server/server-models.cpp b/tools/server/server-models.cpp index 6c618a673c..6be5ffbdf0 100644 --- a/tools/server/server-models.cpp +++ b/tools/server/server-models.cpp @@ -171,7 +171,7 @@ server_presets::server_presets(int argc, char ** argv, common_params & base_para } // read base args from router's argv - common_params_parse(argc, argv, LLAMA_EXAMPLE_SERVER, base_args); + common_params_to_map(argc, argv, LLAMA_EXAMPLE_SERVER, base_args); // remove any router-controlled args from base_args for (const auto & cargs : control_args) { From 5266379bcae74214af397f36aa81b2a08b15d545 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 13 Dec 2025 09:19:51 -0600 Subject: [PATCH 03/23] llama_context: synchronize before reallocating output buffer (#17974) --- src/llama-context.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 2692297dca..9914b3276b 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1318,6 +1318,7 @@ uint32_t llama_context::output_reserve(int32_t n_outputs) { // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark) LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); #endif + synchronize(); buf_output = nullptr; logits = nullptr; embd = nullptr; From 4ed2bae50d64dcff7f99cb2b28f737fda314abf5 Mon Sep 17 00:00:00 2001 From: Sergey Fedorov Date: Sun, 14 Dec 2025 05:02:43 +0800 Subject: [PATCH 04/23] server-models.cpp: add missing (#18000) Fixes: https://github.com/ggml-org/llama.cpp/issues/17999 --- tools/server/server-models.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/server/server-models.cpp b/tools/server/server-models.cpp index 6be5ffbdf0..3690c0bb82 100644 --- a/tools/server/server-models.cpp +++ b/tools/server/server-models.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef _WIN32 #include From c00ff929dcfd150234e62f30e863bca4f1337aee Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sat, 13 Dec 2025 22:33:29 +0100 Subject: [PATCH 05/23] scripts: add script to compare logprobs of llama.cpp against other frameworks (#17947) * scripts: add script to compare logits of llama.cpp against other frameworks * accept custom prompt file * fix code style * clarify endpoint * fix displaying * use abs for diff * fix vllm case * rm output file * rename to compare-logprobs * add "pattern" --- scripts/compare-logprobs.py | 281 ++++++++++++++++++++++++++++++++++++ 1 file changed, 281 insertions(+) create mode 100644 scripts/compare-logprobs.py diff --git a/scripts/compare-logprobs.py b/scripts/compare-logprobs.py new file mode 100644 index 0000000000..63861dd9a4 --- /dev/null +++ b/scripts/compare-logprobs.py @@ -0,0 +1,281 @@ +import argparse +import requests +import json +from pathlib import Path +import logging + +logger = logging.getLogger("compare-logprobs") +logging.basicConfig(level=logging.INFO) + + +DESCRIPTION = """ +Compare logits between llama.cpp and another inference engine using OpenAI-compatible server endpoints. + +Unlike compare-logits.py, it allows dumping logits from a hosted API endpoint. Useful when it's not possible to run both models locally. + +Example usage: + Step 1: Dump logits from two different servers + python scripts/compare-logprobs.py dump logits_llama.log http://localhost:8080/v1/completions + python scripts/compare-logprobs.py dump logits_other.log http://other-engine:8000/v1/completions + + (optionally, you can add --api-key if the endpoint requires authentication) + + Step 2: Compare the dumped logits + python scripts/compare-logprobs.py compare logits_llama.log logits_other.log report.md +""" + + +def generate_input_prompt(length: int) -> list[str]: + CORPUS = """ + You are an advanced AI assistant capable of using tools to gather information, perform calculations, or execute tasks. Always think step by step before responding. If a user's query requires external data, computation, or actions beyond your internal knowledge, use the appropriate tools via function calls. + + ### Tool Call Format: + When you need to use a tool, output the call in this exact XML format. Include the opening and closing tags. Do not escape arguments; they will be parsed as plain text. + + You can make multiple calls in one go by placing them one after another. + """ + words = [w.strip() for w in CORPUS.strip().split(" ")] + words = [w for w in words if len(w) > 0] # filter out empty strings + while len(words) < length: + words += words + return words[:length] + + +def dump_logits( + endpoint: str, + output_path: Path, + input_words: list[str], + pattern: list[tuple[bool, int]], + api_key=None, +): + logger.info(f"Dumping logits to {output_path} from endpoint {endpoint}...") + words = input_words + curr_text = "" + n_total = sum(n for get, n in pattern if get) + n_done = 0 + i_cur = 0 + i_total = len(words) + with output_path.open("w") as f: + for get, n in pattern: + if not get: + # skip n words + for i in range(n): + curr_text += words.pop(0) + " " + i_cur += 1 + continue + # get n words + for i in range(n): + curr_text += words.pop(0) + " " + payload = { + "prompt": curr_text.strip(), + "temperature": 0.0, + "top_k": 1, + "max_tokens": 1, + "logprobs": 1, + "stream": False, + } + response = requests.post( + endpoint, + json=payload, + headers={"Authorization": f"Bearer {api_key}"} if api_key else {}, + ) + response.raise_for_status() + data = response.json() + data["__index"] = i_cur # add index for easier debugging later + data = json.dumps(data) + f.write(f"{data}\n") + n_done += 1 + i_cur += 1 + logger.info( + f"\n\n{data}\n\n[Step: {n_done}/{n_total} | Word: {i_cur}/{i_total}]" + ) + logger.info(f"Logits dumped to {output_path}") + + +def get_token_logprobs(data: dict): + logprobs = data["choices"][0]["logprobs"] + if "content" in logprobs: + # llama.cpp case + top = logprobs["content"][0]["top_logprobs"][0] + return top["token"], top["logprob"] + else: + # vllm case + tokens = logprobs["tokens"] + token_logprobs = logprobs["token_logprobs"] + return tokens[0], token_logprobs[0] + + +def clean_text(text: str) -> str: + return ( + "'" + + text.replace("\n", "\\n") + .replace("\t", "\\t") + .replace("\r", "\\r") + .replace("|", "\\|") + + "'" + ) + + +def compare_logits(input1: Path, input2: Path, output_path: Path): + with input1.open("r") as f1, input2.open("r") as f2, output_path.open("w") as fout: + lines1 = f1.readlines() + lines2 = f2.readlines() + + tab_header = [ + "idx", + input1.name, + "logprob_1", + input2.name, + "logprob_2", + "diff (abs)", + ] + tab_entries = [] + tab_max_widths = [len(h) for h in tab_header] + + assert len(lines1) == len( + lines2 + ), "Input files must have the same number of lines." + + fout.write("# Logits Comparison Report\n\n") + for i, (line1, line2) in enumerate(zip(lines1, lines2)): + if not line1.strip() or not line2.strip(): + continue # skip empty lines + + data1 = json.loads(line1) + data2 = json.loads(line2) + + idx1 = data1.get("__index", -1) + idx2 = data2.get("__index", -1) + if idx1 != idx2: + logger.warning( + f"Warning: Mismatched indices at line {i}: {idx1} vs {idx2}" + ) + + token1, logprob1 = get_token_logprobs(data1) + token2, logprob2 = get_token_logprobs(data2) + + token1 = clean_text(token1) + token2 = clean_text(token2) + abs_diff = abs(logprob1 - logprob2) + + tab_entries.append( + ( + str(idx1 + 1), + token1, + f"{logprob1:.4f}", + token2, + f"{logprob2:.4f}", + f"{(abs_diff):.4f}", + ) + ) + + for i in range(len(tab_entries)): + for j in range(len(tab_header)): + tab_max_widths[j] = max(tab_max_widths[j], len(tab_entries[i][j])) + + output = "" + for j in range(len(tab_header)): + output += f"| {tab_header[j]:<{tab_max_widths[j]}} " + output += "|\n" + for j in range(len(tab_header)): + output += f"|{'-' * (tab_max_widths[j] + 2)}" + output += "|\n" + for entry in tab_entries: + for j in range(len(tab_header)): + output += f"| {entry[j]:<{tab_max_widths[j]}} " + output += "|\n" + + logger.info("\n" + output) + fout.write(output) + logger.info(f"Report written to {output_path}") + + +def parse_pattern(pattern: str) -> list[tuple[bool, int]]: + parts = pattern.split(",") + result = [] + for i, part in enumerate(parts): + n = int(part) + if i % 2 == 0: + result.append((True, n)) # get n words + else: + result.append((False, n)) # skip n words + return result + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter + ) + subparsers = parser.add_subparsers( + dest="verb", required=True, help="action to perform" + ) + + # dump subcommand + parser_dump = subparsers.add_parser("dump", help="dump logits from an endpoint") + parser_dump.add_argument( + "output", type=Path, help="output path for dumped logits (.log)" + ) + parser_dump.add_argument( + "endpoint", type=str, help="OAI-compat /completions endpoint" + ) + parser_dump.add_argument( + "--api-key", + type=str, + default=None, + help="API key for authentication (if required)", + ) + parser_dump.add_argument( + "--file", + type=Path, + default=None, + help="File containing prompt to use instead of the default", + ) + parser_dump.add_argument( + "--pattern", + type=str, + default="10,1000,10,4000,10", + help="Pattern n_get,n_skip,... where n_get is number of words to get and n_skip is number of words to skip (num of words, NOT num of tokens)", + ) + + # compare subcommand + parser_compare = subparsers.add_parser( + "compare", help="compare two dumped logits files" + ) + parser_compare.add_argument("input1", type=Path, help="first input file (.log)") + parser_compare.add_argument("input2", type=Path, help="second input file (.log)") + parser_compare.add_argument( + "output", type=Path, help="output path for comparison report (.md)" + ) + + try: + return parser.parse_args() + except Exception as e: + parser.print_help() + raise e + + +def main(): + args = parse_args() + + if args.verb == "dump": + pattern = parse_pattern(args.pattern) + input_length = sum(n for _, n in pattern) + input_words = generate_input_prompt(input_length) + if args.file is not None: + with args.file.open("r") as f: + input_words = f.read().strip().split(" ") + if input_length < sum(n for _, n in pattern): + raise ValueError( + f"Input file has only {input_length} words, but pattern requires at least {input_length} words." + ) + input_length = len(input_words) + logger.info(f"Using {input_length} words") + dump_logits(args.endpoint, args.output, input_words, pattern, args.api_key) + elif args.verb == "compare": + compare_logits(args.input1, args.input2, args.output) + else: + raise ValueError(f"Unknown verb: {args.verb}") + + +if __name__ == "__main__": + main() From 615655aafe46339f04e2879d14d7d0d68fc2cfcd Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Fri, 12 Dec 2025 22:37:38 +0800 Subject: [PATCH 06/23] cmake : set `CMAKE_RUNTIME_OUTPUT_DIRECTORY` for non standalone build (ggml/1394) Some backend depends on CMAKE_RUNTIME_OUTPUT_DIRECTORY to create temporary file like metal backened. Missing CMAKE_RUNTIME_OUTPUT_DIRECTORY will cause some cmake error like permission denied (try to copy file to root). This PR wants to setup a default path for CMAKE_RUNTIME_OUTPUT_DIRECTORY when it does not exist. --- ggml/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index ab5b4760e2..a65dcfbe1e 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -54,6 +54,10 @@ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) # TODO else() set(GGML_STANDALONE OFF) + + if (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + endif() endif() if (EMSCRIPTEN) From 71fdcf0616fa0f93dab37e4c0d0d77e708e398a9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 13 Dec 2025 08:04:09 +0200 Subject: [PATCH 07/23] ggml : arm repack fix build (whisper/0) --- ggml/src/ggml-cpu/arch/arm/repack.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ggml/src/ggml-cpu/arch/arm/repack.cpp b/ggml/src/ggml-cpu/arch/arm/repack.cpp index 683ed8d2df..fc62f1b86e 100644 --- a/ggml/src/ggml-cpu/arch/arm/repack.cpp +++ b/ggml/src/ggml-cpu/arch/arm/repack.cpp @@ -24,6 +24,7 @@ #define UNUSED GGML_UNUSED +#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) static inline void decode_q4_Kx8_scales_mins(const uint8_t * scales_in, int16x8_t * out_mins, int8_t * out_scales) { @@ -46,6 +47,7 @@ static inline void decode_q4_Kx8_scales_mins(const uint8_t * scales_in, scales_u32[1] = (sm[2] & kmask2) | (((sm[0] >> 6) & kmask3) << 4); memcpy(out_scales, scales_u32, 8); } +#endif void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); From 0e59224990e6b535e87fbf10851dcd35aed04f3a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 13 Dec 2025 10:07:07 +0200 Subject: [PATCH 08/23] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index a879940eae..5823efac2d 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -55bc9320a4aae82af18e23eefd5de319a755d7b9 +130bc125a88bb57664b88932c48c38a1cb316fac From a63cbafbbc5a7afe80b75486cffa651003aaede4 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 13 Dec 2025 22:54:14 +0200 Subject: [PATCH 09/23] ggml : arm repack fix build --- ggml/src/ggml-cpu/arch/arm/repack.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cpu/arch/arm/repack.cpp b/ggml/src/ggml-cpu/arch/arm/repack.cpp index fc62f1b86e..fb7f074a85 100644 --- a/ggml/src/ggml-cpu/arch/arm/repack.cpp +++ b/ggml/src/ggml-cpu/arch/arm/repack.cpp @@ -24,7 +24,7 @@ #define UNUSED GGML_UNUSED -#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) +#if defined(__aarch64__) && defined(__ARM_NEON) && (defined(__ARM_FEATURE_MATMUL_INT8) || defined(__ARM_FEATURE_DOTPROD)) static inline void decode_q4_Kx8_scales_mins(const uint8_t * scales_in, int16x8_t * out_mins, int8_t * out_scales) { From 609a2d026897829730ca231285469a9c80967fb7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 14 Dec 2025 08:34:56 +0200 Subject: [PATCH 10/23] models : fix YaRN regression + consolidate logic (#18006) * models : fix YaRN regression + consolidate logic * cont : fix the fix * cont : remove header * cont : add header --- src/llama-context.cpp | 38 ++++++++++++++++++++++++++++++++++++++ src/llama-graph.cpp | 2 +- src/llama-hparams.cpp | 11 ----------- src/llama-hparams.h | 7 ------- src/llama-kv-cache.cpp | 2 +- src/llama-model.cpp | 26 -------------------------- 6 files changed, 40 insertions(+), 46 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 9914b3276b..2a17e44ecd 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -9,6 +9,7 @@ #include "llama-model.h" #include +#include #include #include #include @@ -72,6 +73,43 @@ llama_context::llama_context( cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f; } + if (cparams.yarn_ext_factor != 0) { + static auto get_mscale = [](float scale, float mscale) { + return scale <= 1.0f ? 1.0f : (0.1f * mscale * logf(scale) + 1.0f); + }; + + const float factor = 1.0f / cparams.rope_freq_scale; + + // ref: https://github.com/huggingface/transformers/blob/6d00f6b0a5679c36510f203e4226e36f517c3032/src/transformers/modeling_rope_utils.py#L336-L348 + if (hparams.rope_yarn_log_mul != 0.0f) { + // note: here we assume `mscale == 1.0f` + // TODO: start reading the actual value of mscale and handle the case where it is not 1.0f + float mscale = 1.0f; + const float mscale_all_dims = hparams.rope_yarn_log_mul; + + // [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX] + // special-case DEEPSEEK v2: + // https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat/blob/main/config.json#L42-L43 + if (model.arch == LLM_ARCH_DEEPSEEK2 && mscale_all_dims != 1.0f) { + mscale = mscale_all_dims; + } + + cparams.yarn_attn_factor = get_mscale(factor, mscale) / get_mscale(factor, mscale_all_dims); + + LLAMA_LOG_WARN("%s: setting new yarn_attn_factor = %.4f (mscale == %.1f, mscale_all_dim = %.1f)\n", + __func__, cparams.yarn_attn_factor, mscale, mscale_all_dims); + } else { + cparams.yarn_attn_factor = get_mscale(factor, 1.0f); + } + + // when YARN is applied with yarn_ext_factor != 0.0f, we need to cancel this factor: + // https://github.com/ggml-org/llama.cpp/blob/a81a569577cc38b32558958b048228150be63eae/ggml/src/ggml-cpu/ops.cpp#L5541-L5544 + // + // ref: https://github.com/ggml-org/llama.cpp/discussions/7416 + // https://github.com/ggml-org/llama.cpp/pull/17945 + cparams.yarn_attn_factor *= 1.0f / (1.0f + 0.1f * logf(factor)); + } + cparams.yarn_attn_factor *= hparams.rope_attn_factor; if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index a1a32494b7..6cf9a883a6 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -574,7 +574,7 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : freq_base (cparams.rope_freq_base), freq_scale (cparams.rope_freq_scale), ext_factor (cparams.yarn_ext_factor), - attn_factor (llama_hparams::yarn_attn_factor_adjust(cparams.yarn_attn_factor, cparams.rope_freq_scale, cparams.yarn_ext_factor)), + attn_factor (cparams.yarn_attn_factor), beta_fast (cparams.yarn_beta_fast), beta_slow (cparams.yarn_beta_slow), norm_eps (hparams.f_norm_eps), diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index 277d0bcfd3..96c9598c24 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -3,7 +3,6 @@ #include "ggml.h" #include -#include void llama_hparams::set_swa_pattern(uint32_t n_pattern, bool dense_first) { if (dense_first) { @@ -231,13 +230,3 @@ bool llama_hparams::is_masked_swa(uint32_t n_swa, llama_swa_type swa_type, llama return false; } - -float llama_hparams::yarn_attn_factor_adjust(float attn_factor, float freq_scale, float ext_factor) { - GGML_ASSERT(ext_factor >= 0.0f); - - if (ext_factor != 0.0f) { - attn_factor *= 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)); - } - - return attn_factor; -} diff --git a/src/llama-hparams.h b/src/llama-hparams.h index c9960e9169..aab319754e 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -268,13 +268,6 @@ struct llama_hparams { // TODO: think of a better place for this function // TODO: pack the SWA params in a struct? static bool is_masked_swa(uint32_t n_swa, llama_swa_type swa_type, llama_pos p0, llama_pos p1); - - // when YARN is applied with yarn_ext_factor != 0.0f, we need to cancel this factor: - // https://github.com/ggml-org/llama.cpp/blob/a81a569577cc38b32558958b048228150be63eae/ggml/src/ggml-cpu/ops.cpp#L5541-L5544 - // - // ref: https://github.com/ggml-org/llama.cpp/discussions/7416 - // https://github.com/ggml-org/llama.cpp/pull/17945 - static float yarn_attn_factor_adjust(float attn_factor, float freq_scale, float ext_factor); }; static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable"); diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp index 8f94c8820c..bf3de2f2ef 100644 --- a/src/llama-kv-cache.cpp +++ b/src/llama-kv-cache.cpp @@ -1372,7 +1372,7 @@ ggml_tensor * llama_kv_cache::build_rope_shift( const auto & yarn_ext_factor = cparams.yarn_ext_factor; const auto & yarn_beta_fast = cparams.yarn_beta_fast; const auto & yarn_beta_slow = cparams.yarn_beta_slow; - const auto & yarn_attn_factor = llama_hparams::yarn_attn_factor_adjust(cparams.yarn_attn_factor, cparams.rope_freq_scale, cparams.yarn_ext_factor); + const auto & yarn_attn_factor = cparams.yarn_attn_factor; const auto & n_rot = hparams.n_rot; const auto & rope_type = hparams.rope_type == LLAMA_ROPE_TYPE_MROPE || hparams.rope_type == LLAMA_ROPE_TYPE_IMROPE diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e4808b1e1e..5da1dd6dbb 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2294,32 +2294,6 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: throw std::runtime_error("unsupported model architecture"); } - // ref: https://github.com/huggingface/transformers/blob/6d00f6b0a5679c36510f203e4226e36f517c3032/src/transformers/modeling_rope_utils.py#L336-L348 - if (hparams.rope_yarn_log_mul != 0.0f) { - const float factor = 1.0f / hparams.rope_freq_scale_train; - - // note: here we assume `mscale == 1.0f` - // TODO: start reading the actual value of mscale and handle the case where it is not 1.0f - float mscale = 1.0f; - const float mscale_all_dims = hparams.rope_yarn_log_mul; - - // [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX] - // special-case DEEPSEEK v2: - // https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat/blob/main/config.json#L42-L43 - if (arch == LLM_ARCH_DEEPSEEK2 && mscale_all_dims != 1.0f) { - mscale = mscale_all_dims; - } - - static auto get_mscale = [](float scale, float mscale) { - return scale <= 1.0f ? 1.0f : (0.1f * mscale * logf(scale) + 1.0f); - }; - - hparams.yarn_attn_factor = get_mscale(factor, mscale) / get_mscale(factor, mscale_all_dims); - - LLAMA_LOG_WARN("%s: setting new yarn_attn_factor = %.4f (mscale == %.1f, mscale_all_dim = %.1f)\n", - __func__, hparams.yarn_attn_factor, mscale, mscale_all_dims); - } - pimpl->n_bytes = ml.n_bytes; pimpl->desc_str = arch_name() + " " + type_name() + " " + ml.ftype_name(); From 77ad8542bd09c374d65be8abffd88ae4aa1e704d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 14 Dec 2025 08:58:13 +0200 Subject: [PATCH 11/23] model-conversion : cast logits to float32 (#18009) --- examples/model-conversion/scripts/causal/run-org-model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/model-conversion/scripts/causal/run-org-model.py b/examples/model-conversion/scripts/causal/run-org-model.py index 7d2b80057c..da1132c003 100755 --- a/examples/model-conversion/scripts/causal/run-org-model.py +++ b/examples/model-conversion/scripts/causal/run-org-model.py @@ -200,7 +200,7 @@ with torch.no_grad(): logits = outputs.logits # Extract logits for the last token (next token prediction) - last_logits = logits[0, -1, :].cpu().numpy() + last_logits = logits[0, -1, :].float().cpu().numpy() print(f"Logits shape: {logits.shape}") print(f"Last token logits shape: {last_logits.shape}") From d15d177f43b7dd7f73a965716bd76a3327abb2d6 Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 14 Dec 2025 07:29:37 +0000 Subject: [PATCH 12/23] vulkan: faster q6_k matmul (#17813) * q6_k faster mul mat * 8 values * fix comment * switch to two at a time * start ci for .glsl files --- .github/workflows/build.yml | 6 ++++-- .gitignore | 1 + .../ggml-vulkan/vulkan-shaders/mul_mm_funcs.glsl | 13 ++++++++----- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9fe1401df4..af4c60be64 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,7 +20,8 @@ on: '**/*.swift', '**/*.m', '**/*.metal', - '**/*.comp' + '**/*.comp', + '**/*.glsl' ] pull_request: @@ -40,7 +41,8 @@ on: '**/*.swift', '**/*.m', '**/*.metal', - '**/*.comp' + '**/*.comp', + '**/*.glsl' ] concurrency: diff --git a/.gitignore b/.gitignore index 428f084110..05eb578a82 100644 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,7 @@ /out/ /tmp/ /autogen-*.md +/common/build-info.cpp # Deprecated diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.glsl b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.glsl index ee5ded2e8d..58ede04400 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.glsl +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.glsl @@ -244,17 +244,20 @@ void load_a_to_shmem(const uint pos_a, const uint row, const uint col, const uin const uint iqs = idx % 128; // 0..127 const uint n = iqs / 64; // 0,1 - const uint b = (iqs % 64) / 32; // 0,1 + const uint b = ((iqs % 64) / 32) * 4; // 0,4 const uint is_b = (iqs % 16) / 8; // 0,1 const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6 const uint is = 8 * n + qhshift + is_b; // 0..15 - const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126 - const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62 + const uint qsi = n * 32 + (iqs % 32); // 0..63 + const uint qhi = n * 16 + (iqs % 16); // 0..31 const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]); - buf_a[buf_idx] = FLOAT_TYPE_VEC2(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32), - dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32)); + const uint ql = (uint(data_a_packed16[ib].ql[qsi]) >> b) & 0x0F0F; + const uint qh = (uint(data_a_packed16[ib].qh[qhi]) >> qhshift) & 0x0303; + const vec2 q = (vec2(unpack8(ql | (qh << 4)).xy) - 32) * dscale; + + buf_a[buf_idx] = FLOAT_TYPE_VEC2(q.x, q.y); #elif defined(DATA_A_IQ1_S) const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row; const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2; From 4722671641fdc29b371d517db05d4b395c6e068c Mon Sep 17 00:00:00 2001 From: lovedheart <6277001+lovedheart@users.noreply.github.com> Date: Sun, 14 Dec 2025 08:47:49 +0100 Subject: [PATCH 13/23] vulkan: improve mul_mat_vec_iq1_s speed (#17874) --- .../vulkan-shaders/mul_mat_vec_iq1_s.comp | 60 ++++++++++++------- 1 file changed, 38 insertions(+), 22 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp index 0b74b33212..e6b1f20215 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp @@ -7,34 +7,50 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; -void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) { - const uint y_idx = i * QUANT_K + 32 * ib32; +void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, + const uint num_blocks_per_row, const uint first_row, const uint num_rows) { + const uint y_idx_base = i * QUANT_K + 32 * ib32; + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + const uint base_b_idx = (j * p.batch_stride_b + b_offset + y_idx_base) / 4; + [[unroll]] for (uint l = 0; l < 4; ++l) { + const vec4 b_val_0 = vec4(data_b_v4[base_b_idx + 2 * l]); + const vec4 b_val_1 = vec4(data_b_v4[base_b_idx + 2 * l + 1]); - uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - const float d = float(data_a[ibi].d); - const uint qh = data_a[ibi].qh[ib32]; - const float dl = d * float(2 * bitfieldExtract(qh, 12, 3) + 1); - const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA; + // index for data_a + uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i; - [[unroll]] for (uint l = 0; l < 4; ++l) { - const uint qs = data_a[ibi].qs[4 * ib32 + l]; - const uint idxhi = bitfieldExtract(qh, 3 * int(l), 3); - const int16_t grid = int16_t(iq1s_grid[qs | (idxhi << 8)]); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const float d = float(data_a[ibi].d); + const uint qh = data_a[ibi].qh[ib32]; - [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { - vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]); - vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]); + const float dl = d * float(2 * bitfieldExtract(qh, 12, 3) + 1); + const uint qs = data_a[ibi].qs[4 * ib32 + l]; + const uint idxhi = bitfieldExtract(qh, 3 * int(l), 3); + const uint16_t grid = uint16_t(iq1s_grid[qs | (idxhi << 8)]); - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int k = 0; k < 4; ++k) { - sum = fma(FLOAT_TYPE(b0[k]), bitfieldExtract(grid, 2 * k, 2) + delta, - fma(FLOAT_TYPE(b4[k]), bitfieldExtract(grid, 8 + 2 * k, 2) + delta, sum)); - } - temp[j][n] = fma(dl, sum, temp[j][n]); + const float delta_val = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA; + const vec4 delta_v = vec4(delta_val); + const vec4 fbits0 = vec4( + float(bitfieldExtract(grid, 0, 2)), + float(bitfieldExtract(grid, 2, 2)), + float(bitfieldExtract(grid, 4, 2)), + float(bitfieldExtract(grid, 6, 2)) + ); + const vec4 fbits1 = vec4( + float(bitfieldExtract(grid, 8, 2)), + float(bitfieldExtract(grid, 10, 2)), + float(bitfieldExtract(grid, 12, 2)), + float(bitfieldExtract(grid, 14, 2)) + ); + + vec4 sum_v = fma(b_val_0, fbits0 + delta_v, vec4(0.0)); + sum_v = fma(b_val_1, fbits1 + delta_v, sum_v); + FLOAT_TYPE sum = dot(sum_v, vec4(1.0)); + + temp[j][n] = fma(dl, sum, temp[j][n]); + ibi += num_blocks_per_row; } } - ibi += num_blocks_per_row; } } From 3238b1400cf4f0f174c39579d92ce1a8ddb68110 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sun, 14 Dec 2025 02:00:00 -0600 Subject: [PATCH 14/23] vulkan: Fix data race/hang in scalar/cm1 flash attention (#17887) --- ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp | 3 +++ ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp | 3 +++ 2 files changed, 6 insertions(+) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp index 4bef48b006..0379e5d502 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp @@ -256,6 +256,9 @@ void main() { barrier(); } + // prevent race on tmpsh + barrier(); + // reduce across threads [[unroll]] for (uint32_t r = 0; r < Br; ++r) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp index cd82e4abfa..c995ab140e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp @@ -302,6 +302,9 @@ void main() { barrier(); } + // prevent race on tmpsh + barrier(); + // reduce across threads float rowmaxf[rows_per_thread], eMf[rows_per_thread], Moldf[rows_per_thread]; From 254098a279691615438f1a1bbe87f372199e13ea Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 14 Dec 2025 10:11:13 +0200 Subject: [PATCH 15/23] common : refactor common_sampler + grammar logic changes (#17937) * common : refactor common_sampler + grammar logic changes * tests : increase max_tokens to get needed response * batched : fix uninitialized samplers --- common/arg.cpp | 2 +- common/common.cpp | 192 ++++++++++++------ common/common.h | 31 ++- common/sampling.cpp | 183 +++++++++-------- common/sampling.h | 17 +- common/speculative.cpp | 2 +- examples/batched/batched.cpp | 30 ++- examples/embedding/embedding.cpp | 6 +- examples/eval-callback/eval-callback.cpp | 6 +- examples/lookahead/lookahead.cpp | 6 +- examples/lookup/lookup-create.cpp | 8 +- examples/lookup/lookup-stats.cpp | 8 +- examples/lookup/lookup.cpp | 6 +- examples/parallel/parallel.cpp | 6 +- examples/retrieval/retrieval.cpp | 6 +- examples/save-load-state/save-load-state.cpp | 6 +- .../speculative-simple/speculative-simple.cpp | 12 +- examples/speculative/speculative.cpp | 16 +- examples/training/finetune.cpp | 17 +- tools/completion/completion.cpp | 20 +- tools/cvector-generator/cvector-generator.cpp | 6 +- tools/imatrix/imatrix.cpp | 6 +- tools/mtmd/mtmd-cli.cpp | 6 +- tools/perplexity/perplexity.cpp | 6 +- tools/server/server-context.cpp | 47 ++--- .../tests/unit/test_compat_anthropic.py | 2 +- tools/tts/tts.cpp | 12 +- 27 files changed, 372 insertions(+), 293 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index bb2a6840ba..aaa7b92a2e 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1415,7 +1415,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.sampling.top_k = value; params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_K; } - ).set_sparam()); + ).set_sparam().set_env("LLAMA_ARG_TOP_K")); add_opt(common_arg( {"--top-p"}, "N", string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p), diff --git a/common/common.cpp b/common/common.cpp index 0497f90a28..b76dfa10ea 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1013,31 +1013,40 @@ bool tty_can_use_colors() { // Model utils // -static inline void common_init_sampler_from_model( +// TODO: move to common/sampling +static void common_init_sampler_from_model( const llama_model * model, common_params_sampling & sparams) { const uint64_t config = sparams.user_sampling_config; auto get_int32 = [&](const char * key, int32_t & dst, uint64_t user_config) { - if (config & user_config) return; + if (config & user_config) { + return; + } char buf[64] = {0}; if (llama_model_meta_val_str(model, key, buf, sizeof(buf)) > 0) { char * end = nullptr; int32_t v = strtol(buf, &end, 10); - if (end && end != buf) dst = v; + if (end && end != buf) { + dst = v; + } } }; auto get_float = [&](const char * key, float & dst, uint64_t user_config) { - if (config & user_config) return; + if (config & user_config) { + return; + } char buf[128] = {0}; if (llama_model_meta_val_str(model, key, buf, sizeof(buf)) > 0) { char * end = nullptr; float v = strtof(buf, &end); - if (end && end != buf) dst = v; + if (end && end != buf) { + dst = v; + } } }; @@ -1065,31 +1074,122 @@ static inline void common_init_sampler_from_model( get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA), sparams.mirostat_eta, common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_ETA); } -struct common_init_result common_init_from_params(common_params & params) { - common_init_result iparams; - auto mparams = common_model_params_to_llama(params); +struct common_init_result::impl { + impl() = default; + ~impl() = default; + + llama_model_ptr model; + llama_context_ptr context; + + std::vector lora; + + std::vector samplers; +}; + +common_init_result::common_init_result(common_params & params) : + pimpl(new impl{}) { + const auto mparams = common_model_params_to_llama(params); llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); if (model == NULL) { - LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", - __func__, params.model.path.c_str()); - return iparams; + return; } - common_init_sampler_from_model(model, params.sampling); + pimpl->model.reset(model); const llama_vocab * vocab = llama_model_get_vocab(model); + // updates params.sampling + // TODO: fix naming + common_init_sampler_from_model(model, params.sampling); + auto cparams = common_context_params_to_llama(params); + if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__); + params.sampling.ignore_eos = false; + } + + // initialize once + for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) { + if (llama_vocab_is_eog(vocab, i)) { + LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(vocab, i).c_str(), -INFINITY); + params.sampling.logit_bias_eog.push_back({i, -INFINITY}); + } + } + + if (params.sampling.ignore_eos) { + // add EOG biases to the active set of logit biases + params.sampling.logit_bias.insert( + params.sampling.logit_bias.end(), + params.sampling.logit_bias_eog.begin(), params.sampling.logit_bias_eog.end()); + } + + //if (params.sampling.penalty_last_n == -1) { + // LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); + // params.sampling.penalty_last_n = llama_n_ctx(lctx); + //} + + //if (params.sampling.dry_penalty_last_n == -1) { + // LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); + // params.sampling.dry_penalty_last_n = llama_n_ctx(lctx); + //} + + pimpl->samplers.resize(cparams.n_seq_max); + + for (int i = 0; i < (int) cparams.n_seq_max; ++i) { + pimpl->samplers[i].reset(common_sampler_init(model, params.sampling)); + } + llama_context * lctx = llama_init_from_model(model, cparams); if (lctx == NULL) { LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", - __func__, params.model.path.c_str()); - llama_model_free(model); - return iparams; + __func__, params.model.path.c_str()); + return; } + pimpl->context.reset(lctx); +} + +llama_model * common_init_result::model() { + return pimpl->model.get(); +} + +llama_context * common_init_result::context() { + return pimpl->context.get(); +} + +common_sampler * common_init_result::sampler(llama_seq_id seq_id) { + return pimpl->samplers[seq_id].get(); +} + +std::vector & common_init_result::lora() { + return pimpl->lora; +} + +void common_init_result::free_context() { + pimpl->context.reset(); +} + +common_init_result_ptr common_init_from_params(common_params & params) { + common_init_result_ptr res(new common_init_result(params)); + + llama_model * model = res->model(); + if (model == NULL) { + LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", + __func__, params.model.path.c_str()); + return res; + } + + llama_context * lctx = res->context(); + if (lctx == NULL) { + LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", + __func__, params.model.path.c_str()); + return res; + } + + const llama_vocab * vocab = llama_model_get_vocab(model); + if (params.ctx_shift && !llama_memory_can_shift(llama_get_memory(lctx))) { LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__); params.ctx_shift = false; @@ -1101,10 +1201,7 @@ struct common_init_result common_init_from_params(common_params & params) { const auto cvec = common_control_vector_load(params.control_vectors); if (cvec.n_embd == -1) { - llama_free(lctx); - llama_model_free(model); - - return iparams; + return res; } int err = llama_apply_adapter_cvec( @@ -1115,10 +1212,7 @@ struct common_init_result common_init_from_params(common_params & params) { params.control_vector_layer_start, params.control_vector_layer_end); if (err) { - llama_free(lctx); - llama_model_free(model); - - return iparams; + return res; } } @@ -1142,10 +1236,7 @@ struct common_init_result common_init_from_params(common_params & params) { } if (!ok) { - llama_free(lctx); - llama_model_free(model); - - return iparams; + return res; } } @@ -1155,9 +1246,7 @@ struct common_init_result common_init_from_params(common_params & params) { lora.reset(llama_adapter_lora_init(model, la.path.c_str())); if (lora == nullptr) { LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str()); - llama_free(lctx); - llama_model_free(model); - return iparams; + return res; } char buf[1024]; @@ -1166,43 +1255,13 @@ struct common_init_result common_init_from_params(common_params & params) { la.task_name = buf; llama_adapter_meta_val_str(la.ptr, "adapter.lora.prompt_prefix", buf, sizeof(buf)); la.prompt_prefix = buf; - iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters + res->lora().emplace_back(std::move(lora)); // copy to list of loaded adapters } if (!params.lora_init_without_apply) { common_set_adapter_lora(lctx, params.lora_adapters); } - if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__); - params.sampling.ignore_eos = false; - } - - // initialize once - for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) { - if (llama_vocab_is_eog(vocab, i)) { - LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY); - params.sampling.logit_bias_eog.push_back({i, -INFINITY}); - } - } - - if (params.sampling.ignore_eos) { - // add EOG biases to the active set of logit biases - params.sampling.logit_bias.insert( - params.sampling.logit_bias.end(), - params.sampling.logit_bias_eog.begin(), params.sampling.logit_bias_eog.end()); - } - - if (params.sampling.penalty_last_n == -1) { - LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); - params.sampling.penalty_last_n = llama_n_ctx(lctx); - } - - if (params.sampling.dry_penalty_last_n == -1) { - LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); - params.sampling.dry_penalty_last_n = llama_n_ctx(lctx); - } - if (params.warmup) { LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__); @@ -1241,12 +1300,11 @@ struct common_init_result common_init_from_params(common_params & params) { llama_set_warmup(lctx, false); } - iparams.model.reset(model); - iparams.context.reset(lctx); - - return iparams; + return res; } +common_init_result::~common_init_result() = default; + std::string get_model_endpoint() { const char * model_endpoint_env = getenv("MODEL_ENDPOINT"); // We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility. @@ -1255,7 +1313,9 @@ std::string get_model_endpoint() { std::string model_endpoint = "https://huggingface.co/"; if (endpoint_env) { model_endpoint = endpoint_env; - if (model_endpoint.back() != '/') model_endpoint += '/'; + if (model_endpoint.back() != '/') { + model_endpoint += '/'; + } } return model_endpoint; } diff --git a/common/common.h b/common/common.h index 2fd83f0cf9..4edb74b706 100644 --- a/common/common.h +++ b/common/common.h @@ -195,7 +195,6 @@ struct common_params_sampling { std::vector dry_sequence_breakers = {"\n", ":", "\"", "*"}; // default sequence breakers for DRY - std::vector samplers = { COMMON_SAMPLER_TYPE_PENALTIES, COMMON_SAMPLER_TYPE_DRY, @@ -216,6 +215,10 @@ struct common_params_sampling { std::vector logit_bias; // logit biases to apply std::vector logit_bias_eog; // pre-calculated logit biases for EOG tokens + bool has_logit_bias() const { + return !logit_bias.empty(); + } + // print the parameters into a string std::string print() const; }; @@ -669,15 +672,29 @@ bool tty_can_use_colors(); // Model utils // -// note: defines object's lifetime -struct common_init_result { - llama_model_ptr model; - llama_context_ptr context; +struct common_sampler; - std::vector lora; +// note: defines the model, context, samplers, ets. lifetimes +struct common_init_result { + common_init_result(common_params & params); + ~common_init_result(); + + llama_model * model(); + llama_context * context(); + common_sampler * sampler(llama_seq_id seq_id); + + std::vector & lora(); + + void free_context(); + +private: + struct impl; + std::unique_ptr pimpl; }; -struct common_init_result common_init_from_params(common_params & params); +using common_init_result_ptr = std::unique_ptr; + +common_init_result_ptr common_init_from_params(common_params & params); struct llama_model_params common_model_params_to_llama ( common_params & params); struct llama_context_params common_context_params_to_llama(const common_params & params); diff --git a/common/sampling.cpp b/common/sampling.cpp index 7a6b7be1e0..6935d84e22 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -104,9 +104,10 @@ struct ring_buffer { struct common_sampler { common_params_sampling params; - struct llama_sampler * grmr; struct llama_sampler * chain; + bool grammar; + ring_buffer prev; std::vector cur; @@ -116,7 +117,6 @@ struct common_sampler { void reset() { prev.clear(); - llama_sampler_reset(grmr); llama_sampler_reset(chain); } @@ -167,10 +167,15 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co lparams.no_perf = params.no_perf; - struct llama_sampler * grmr; + llama_sampler * chain = llama_sampler_chain_init(lparams); + + bool grammar = false; + std::vector samplers; + if (params.grammar.compare(0, 11, "%llguidance") == 0) { #ifdef LLAMA_USE_LLGUIDANCE - grmr = llama_sampler_init_llg(vocab, "lark", params.grammar.c_str()); + samplers.push_back(llama_sampler_init_llg(vocab, "lark", params.grammar.c_str())); + grammar = true; #else GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled"); #endif // LLAMA_USE_LLGUIDANCE @@ -217,30 +222,23 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co trigger_patterns_c.push_back(regex.c_str()); } - grmr = params.grammar_lazy - ? llama_sampler_init_grammar_lazy_patterns(vocab, params.grammar.c_str(), "root", - trigger_patterns_c.data(), trigger_patterns_c.size(), - trigger_tokens.data(), trigger_tokens.size()) - : llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"); - if (!grmr) { - return nullptr; + if (!params.grammar.empty()) { + if (params.grammar_lazy) { + samplers.push_back( + llama_sampler_init_grammar_lazy_patterns(vocab, params.grammar.c_str(), "root", + trigger_patterns_c.data(), trigger_patterns_c.size(), + trigger_tokens.data(), trigger_tokens.size())); + } else { + samplers.push_back(llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root")); + } + + grammar = true; } } - auto * result = new common_sampler { - /* .params = */ params, - /* .grmr = */ grmr, - /* .chain = */ llama_sampler_chain_init(lparams), - /* .prev = */ ring_buffer(std::max(32, params.n_prev)), - /* .cur = */ {}, - /* .cur_p = */ {}, - }; - - llama_sampler_chain_add(result->chain, - llama_sampler_init_logit_bias( - llama_vocab_n_tokens(vocab), - params.logit_bias.size(), - params.logit_bias.data())); + if (params.has_logit_bias()) { + samplers.push_back(llama_sampler_init_logit_bias(llama_vocab_n_tokens(vocab), params.logit_bias.size(), params.logit_bias.data())); + } if (params.mirostat == 0) { for (const auto & cnstr : params.samplers) { @@ -253,58 +251,70 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co c_breakers.push_back(str.c_str()); } - llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); + samplers.push_back(llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); } break; case COMMON_SAMPLER_TYPE_TOP_K: - llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k)); + samplers.push_back(llama_sampler_init_top_k (params.top_k)); break; case COMMON_SAMPLER_TYPE_TOP_P: - llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep)); + samplers.push_back(llama_sampler_init_top_p (params.top_p, params.min_keep)); break; case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: - llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma)); + samplers.push_back(llama_sampler_init_top_n_sigma(params.top_n_sigma)); break; case COMMON_SAMPLER_TYPE_MIN_P: - llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep)); + samplers.push_back(llama_sampler_init_min_p (params.min_p, params.min_keep)); break; case COMMON_SAMPLER_TYPE_XTC: - llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed)); + samplers.push_back(llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed)); break; case COMMON_SAMPLER_TYPE_TYPICAL_P: - llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep)); + samplers.push_back(llama_sampler_init_typical (params.typ_p, params.min_keep)); break; case COMMON_SAMPLER_TYPE_TEMPERATURE: - llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent)); + samplers.push_back(llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent)); break; case COMMON_SAMPLER_TYPE_INFILL: - llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab)); + samplers.push_back(llama_sampler_init_infill (vocab)); break; case COMMON_SAMPLER_TYPE_PENALTIES: - llama_sampler_chain_add(result->chain, llama_sampler_init_penalties (params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present)); + samplers.push_back(llama_sampler_init_penalties (params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present)); break; default: GGML_ASSERT(false && "unknown sampler type"); } } - llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed)); + + samplers.push_back(llama_sampler_init_dist(params.seed)); } else if (params.mirostat == 1) { - llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); - llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); + samplers.push_back(llama_sampler_init_temp(params.temp)); + samplers.push_back(llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); } else if (params.mirostat == 2) { - llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); - llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta)); + samplers.push_back(llama_sampler_init_temp(params.temp)); + samplers.push_back(llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta)); } else { GGML_ASSERT(false && "unknown mirostat version"); } + for (auto * smpl : samplers) { + llama_sampler_chain_add(chain, smpl); + } + + auto * result = new common_sampler { + /* .params = */ params, + /* .chain = */ chain, + /* .grammar = */ grammar, + /* .prev = */ ring_buffer(std::max(32, params.n_prev)), + /* .cur = */ {}, + /* .cur_p = */ {}, + }; + return result; } void common_sampler_free(struct common_sampler * gsmpl) { if (gsmpl) { - llama_sampler_free(gsmpl->grmr); - llama_sampler_free(gsmpl->chain); delete gsmpl; @@ -314,11 +324,24 @@ void common_sampler_free(struct common_sampler * gsmpl) { void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) { const auto tm = gsmpl->tm(); - if (accept_grammar) { - llama_sampler_accept(gsmpl->grmr, token); - } + if (gsmpl->grammar) { + const int n_smpl = llama_sampler_chain_n(gsmpl->chain); - llama_sampler_accept(gsmpl->chain, token); + for (int i = 0; i < n_smpl; i++) { + auto * smpl = llama_sampler_chain_get(gsmpl->chain, i); + + // the grammar sampler is always the first one + if (i == 0) { + if (accept_grammar) { + llama_sampler_accept(smpl, token); + } + } else { + llama_sampler_accept(smpl, token); + } + } + } else { + llama_sampler_accept(gsmpl->chain, token); + } gsmpl->prev.push_back(token); } @@ -329,12 +352,12 @@ void common_sampler_reset(struct common_sampler * gsmpl) { struct common_sampler * common_sampler_clone(common_sampler * gsmpl) { return new common_sampler { - /* .params = */ gsmpl->params, - /* .grmr = */ llama_sampler_clone(gsmpl->grmr), - /* .chain = */ llama_sampler_clone(gsmpl->chain), - /* .prev = */ gsmpl->prev, - /* .cur = */ gsmpl->cur, - /* .cur_p = */ gsmpl->cur_p, + /* .params = */ gsmpl->params, + /* .chain = */ llama_sampler_clone(gsmpl->chain), + /* .grammar = */ gsmpl->grammar, + /* .prev = */ gsmpl->prev, + /* .cur = */ gsmpl->cur, + /* .cur_p = */ gsmpl->cur_p, }; } @@ -383,58 +406,33 @@ void common_perf_print(const struct llama_context * ctx, const struct common_sam } } -llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) { +struct llama_sampler * common_sampler_get(const struct common_sampler * gsmpl) { + return gsmpl->chain; +} + +llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx) { llama_synchronize(ctx); // start measuring sampling time after the llama_context synchronization in order to not measure any ongoing async operations const auto tm = gsmpl->tm(); - gsmpl->set_logits(ctx, idx); + llama_token id = LLAMA_TOKEN_NULL; - auto & grmr = gsmpl->grmr; auto & chain = gsmpl->chain; auto & cur_p = gsmpl->cur_p; // initialized by set_logits - if (grammar_first) { - llama_sampler_apply(grmr, &cur_p); - } + gsmpl->set_logits(ctx, idx); llama_sampler_apply(chain, &cur_p); GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration"); - const llama_token id = cur_p.data[cur_p.selected].id; + id = cur_p.data[cur_p.selected].id; - if (grammar_first) { - return id; - } - - // check if it the sampled token fits the grammar - { - llama_token_data single_token_data = { id, 1.0f, 0.0f }; - llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false }; - - llama_sampler_apply(grmr, &single_token_data_array); - - const bool is_valid = single_token_data_array.data[0].logit != -INFINITY; - if (is_valid) { - return id; - } - } - - // resampling: - // if the token is not valid, sample again, but first apply the grammar sampler and then the sampling chain - gsmpl->set_logits(ctx, idx); - - llama_sampler_apply(grmr, &cur_p); - llama_sampler_apply(chain, &cur_p); - - GGML_ASSERT(cur_p.selected != -1 && "no selected token during re-sampling - check your sampling configuration"); - - return cur_p.data[cur_p.selected].id; + return id; } -std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector & idxs, const llama_tokens & draft, bool grammar_first) { +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector & idxs, const llama_tokens & draft) { GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1"); std::vector result; @@ -442,7 +440,7 @@ std::vector common_sampler_sample_and_accept_n(struct common_sample size_t i = 0; for (; i < draft.size(); i++) { - const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first); + const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i]); common_sampler_accept(gsmpl, id, true); @@ -454,7 +452,7 @@ std::vector common_sampler_sample_and_accept_n(struct common_sample } if (i == draft.size()) { - const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first); + const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i]); common_sampler_accept(gsmpl, id, true); @@ -464,13 +462,13 @@ std::vector common_sampler_sample_and_accept_n(struct common_sample return result; } -std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) { +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft) { std::vector idxs(draft.size() + 1); for (size_t i = 0; i < idxs.size(); ++i) { idxs[i] = i; } - return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first); + return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft); } uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) { @@ -515,7 +513,8 @@ std::string common_sampler_print(const struct common_sampler * gsmpl) { for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) { const auto * smpl = llama_sampler_chain_get(gsmpl->chain, i); - result += std::string("-> ") + llama_sampler_name(smpl) + " "; + result += std::string("-> "); + result += std::string(llama_sampler_name(smpl)) + " "; } return result; diff --git a/common/sampling.h b/common/sampling.h index e198eecda3..ace5d3d020 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -48,6 +48,8 @@ struct common_sampler * common_sampler_clone (struct common_sampler * gsmpl); // arguments can be nullptr to skip printing void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl); +struct llama_sampler * common_sampler_get(const struct common_sampler * gsmpl); + // extended sampling implementation: // // - set logits @@ -55,10 +57,7 @@ void common_perf_print(const struct llama_context * ctx, const struct common_sam // - check if the token fits the grammar (if any) // - if not: resample by first applying the grammar constraints and then sampling again (slower path) // -// if grammar_first is true, the grammar is applied before the samplers (slower) -// useful in cases where all the resulting candidates (not just the sampled one) must fit the grammar -// -llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first = false); +llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx); // generalized version of common_sampler_sample // @@ -76,10 +75,10 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co // // returns at least 1 token, up to idxs.size() // -std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector & idxs, const llama_tokens & draft, bool grammar_first = false); +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector & idxs, const llama_tokens & draft); // assume idxs == [ 0, 1, 2, ..., draft.size() ] -std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first = false); +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft); uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl); @@ -107,3 +106,9 @@ std::vector common_sampler_types_from_chars(const std: llama_sampler * llama_sampler_init_llg(const llama_vocab * vocab, const char * grammar_kind, const char * grammar_data); + +struct common_sampler_deleter { + void operator()(common_sampler * s) { common_sampler_free(s); } +}; + +typedef std::unique_ptr common_sampler_ptr; diff --git a/common/speculative.cpp b/common/speculative.cpp index 3e83b0964c..1e12383ae6 100644 --- a/common/speculative.cpp +++ b/common/speculative.cpp @@ -315,7 +315,7 @@ llama_tokens common_speculative_gen_draft( for (int i = 0; i < params.n_draft; ++i) { common_batch_clear(batch); - common_sampler_sample(smpl, ctx_dft, 0, true); + common_sampler_sample(smpl, ctx_dft, 0); const auto * cur_p = common_sampler_get_candidates(smpl, true); diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 1a5de5928a..36a12d299f 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -2,6 +2,7 @@ #include "common.h" #include "log.h" #include "llama.h" +#include "sampling.h" #include #include @@ -64,17 +65,23 @@ int main(int argc, char ** argv) { ctx_params.n_ctx = n_kv_req; ctx_params.n_batch = std::max(n_predict, n_parallel); - llama_context * ctx = llama_init_from_model(model, ctx_params); - auto sparams = llama_sampler_chain_default_params(); sparams.no_perf = false; - llama_sampler * smpl = llama_sampler_chain_init(sparams); + std::vector samplers; - llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k)); - llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep)); - llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp)); - llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed)); + for (int32_t i = 0; i < n_parallel; ++i) { + llama_sampler * smpl = llama_sampler_chain_init(sparams); + + llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k)); + llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep)); + llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp)); + llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed)); + + samplers.push_back(smpl); + } + + llama_context * ctx = llama_init_from_model(model, ctx_params); if (ctx == NULL) { LOG_ERR("%s: error: failed to create the llama_context\n" , __func__); @@ -173,7 +180,7 @@ int main(int argc, char ** argv) { continue; } - const llama_token new_token_id = llama_sampler_sample(smpl, ctx, i_batch[i]); + const llama_token new_token_id = llama_sampler_sample(samplers[i], ctx, i_batch[i]); // is it an end of generation? -> mark the stream as finished if (llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_predict) { @@ -229,14 +236,17 @@ int main(int argc, char ** argv) { __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f)); LOG("\n"); - llama_perf_sampler_print(smpl); + llama_perf_sampler_print(samplers[0]); llama_perf_context_print(ctx); fprintf(stderr, "\n"); llama_batch_free(batch); - llama_sampler_free(smpl); + for (auto & sampler_config : samplers) { + llama_sampler_free(sampler_config); + } + llama_free(ctx); llama_model_free(model); diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index fe91b308cd..81111e81b2 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -131,10 +131,10 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); // load the model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); diff --git a/examples/eval-callback/eval-callback.cpp b/examples/eval-callback/eval-callback.cpp index 80c693ce61..408338f1af 100644 --- a/examples/eval-callback/eval-callback.cpp +++ b/examples/eval-callback/eval-callback.cpp @@ -202,10 +202,10 @@ int main(int argc, char ** argv) { params.warmup = false; // init - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == nullptr || ctx == nullptr) { LOG_ERR("%s : failed to init\n", __func__); diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index 1e26d8221b..f54cfdd77f 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -55,10 +55,10 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); // load the target model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); auto * mem = llama_get_memory(ctx); diff --git a/examples/lookup/lookup-create.cpp b/examples/lookup/lookup-create.cpp index 3da45ed9e0..bb94a8fe06 100644 --- a/examples/lookup/lookup-create.cpp +++ b/examples/lookup/lookup-create.cpp @@ -18,16 +18,16 @@ int main(int argc, char ** argv){ llama_numa_init(params.numa); // load the model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model_ptr & model = llama_init.model; - llama_context_ptr & ctx = llama_init.context; + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); GGML_ASSERT(model != nullptr); // tokenize the prompt std::vector inp; - inp = common_tokenize(ctx.get(), params.prompt, true, true); + inp = common_tokenize(ctx, params.prompt, true, true); fprintf(stderr, "%s: tokenization done\n", __func__); common_ngram_cache ngram_cache; diff --git a/examples/lookup/lookup-stats.cpp b/examples/lookup/lookup-stats.cpp index fcb289abe0..135f6fcab9 100644 --- a/examples/lookup/lookup-stats.cpp +++ b/examples/lookup/lookup-stats.cpp @@ -28,13 +28,13 @@ int main(int argc, char ** argv){ llama_numa_init(params.numa); // load the model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_context_ptr & ctx = llama_init.context; + llama_context * ctx = llama_init->context(); // tokenize the prompt std::vector inp; - inp = common_tokenize(ctx.get(), params.prompt, true, true); + inp = common_tokenize(ctx, params.prompt, true, true); common_ngram_cache ngram_cache_context; common_ngram_cache ngram_cache_dynamic; @@ -65,7 +65,7 @@ int main(int argc, char ** argv){ } const int n_input = inp.size(); - const int n_ctx = llama_n_ctx(ctx.get()); + const int n_ctx = llama_n_ctx(ctx); int n_drafted = 0; int n_accept = 0; diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index 2bfa26b55f..27f159940a 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -29,10 +29,10 @@ int main(int argc, char ** argv){ llama_numa_init(params.numa); // load the model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); const llama_vocab * vocab = llama_model_get_vocab(model); diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index e48f48fc32..c92173ae29 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -192,10 +192,10 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); // load the target model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); auto * mem = llama_get_memory(ctx); diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp index 042e12c2bf..2c2143ad10 100644 --- a/examples/retrieval/retrieval.cpp +++ b/examples/retrieval/retrieval.cpp @@ -149,10 +149,10 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); // load the model - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 4cd3071f76..39d4464663 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -34,10 +34,10 @@ int main(int argc, char ** argv) { std::string result2; // init - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == nullptr || ctx == nullptr) { fprintf(stderr, "%s : failed to init\n", __func__); diff --git a/examples/speculative-simple/speculative-simple.cpp b/examples/speculative-simple/speculative-simple.cpp index 0d11d0f803..8141052a22 100644 --- a/examples/speculative-simple/speculative-simple.cpp +++ b/examples/speculative-simple/speculative-simple.cpp @@ -40,10 +40,10 @@ int main(int argc, char ** argv) { llama_context * ctx_dft = NULL; // load the target model - common_init_result llama_init_tgt = common_init_from_params(params); + auto llama_init_tgt = common_init_from_params(params); - model_tgt = llama_init_tgt.model.get(); - ctx_tgt = llama_init_tgt.context.get(); + model_tgt = llama_init_tgt->model(); + ctx_tgt = llama_init_tgt->context(); const llama_vocab * vocab = llama_model_get_vocab(model_tgt); @@ -61,10 +61,10 @@ int main(int argc, char ** argv) { params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; params.tensor_buft_overrides = params.speculative.tensor_buft_overrides; - common_init_result llama_init_dft = common_init_from_params(params); + auto llama_init_dft = common_init_from_params(params); - //model_dft = llama_init_dft.model.get(); - ctx_dft = llama_init_dft.context.get(); + //model_dft = llama_init_dft->model(); + ctx_dft = llama_init_dft->context(); if (!common_speculative_are_compatible(ctx_tgt, ctx_dft)) { LOG_INF("the draft model '%s' is not compatible with the target model '%s'. tokens will be translated between the draft and target models.\n", params.speculative.model.path.c_str(), params.model.path.c_str()); diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 5f5ac5eb64..2fb7f6374e 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -71,10 +71,10 @@ int main(int argc, char ** argv) { llama_context * ctx_dft = NULL; // load the target model - common_init_result llama_init_tgt = common_init_from_params(params); + auto llama_init_tgt = common_init_from_params(params); - model_tgt = llama_init_tgt.model.get(); - ctx_tgt = llama_init_tgt.context.get(); + model_tgt = llama_init_tgt->model(); + ctx_tgt = llama_init_tgt->context(); // load the draft model params.devices = params.speculative.devices; @@ -87,10 +87,10 @@ int main(int argc, char ** argv) { params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; params.tensor_buft_overrides = params.speculative.tensor_buft_overrides; - common_init_result llama_init_dft = common_init_from_params(params); + auto llama_init_dft = common_init_from_params(params); - model_dft = llama_init_dft.model.get(); - ctx_dft = llama_init_dft.context.get(); + model_dft = llama_init_dft->model(); + ctx_dft = llama_init_dft->context(); const llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt); const llama_vocab * vocab_dft = llama_model_get_vocab(model_dft); @@ -242,7 +242,7 @@ int main(int argc, char ** argv) { bool accept = false; if (params.sampling.temp > 0) { // stochastic verification - common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true); + common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft]); auto & dist_tgt = *common_sampler_get_candidates(smpl, true); @@ -491,7 +491,7 @@ int main(int argc, char ** argv) { continue; } - common_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true); + common_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft); const auto * cur_p = common_sampler_get_candidates(drafts[s].smpl, true); diff --git a/examples/training/finetune.cpp b/examples/training/finetune.cpp index 416d8d8f6c..c82de8d35d 100644 --- a/examples/training/finetune.cpp +++ b/examples/training/finetune.cpp @@ -39,9 +39,10 @@ int main(int argc, char ** argv) { llama_backend_init(); llama_numa_init(params.numa); // load the model and apply lora adapter, if any - common_init_result llama_init = common_init_from_params(params); - llama_model_ptr & model = llama_init.model; - llama_context_ptr & ctx = llama_init.context; + auto llama_init = common_init_from_params(params); + + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); @@ -54,8 +55,8 @@ int main(int argc, char ** argv) { LOG_INF("%s\n", common_params_get_system_info(params).c_str()); } - std::vector tokens = common_tokenize(ctx.get(), params.prompt, true); - ggml_opt_dataset_t dataset = common_opt_dataset_init(ctx.get(), tokens, llama_n_ctx(ctx.get()) / 2); + std::vector tokens = common_tokenize(ctx, params.prompt, true); + ggml_opt_dataset_t dataset = common_opt_dataset_init(ctx, tokens, llama_n_ctx(ctx) / 2); struct lr_opt & lr = params.lr; LOG_INF("-optimizer %s -lr0 %.2g -wd %.2g -lr-min %.2g -min-epochs %.2g -epochs %d -period %.2g -val %.2g\n", @@ -70,7 +71,7 @@ int main(int argc, char ** argv) { /*get_opt_pars_ud =*/¶ms.lr, /*optimizer_type =*/params.optimizer, }; - llama_opt_init(ctx.get(), model.get(), lopt_params); + llama_opt_init(ctx, model, lopt_params); const int64_t idata_split = ggml_opt_dataset_ndata(dataset) * (1.0f - params.val_split); @@ -78,7 +79,7 @@ int main(int argc, char ** argv) { ggml_opt_result_t result_eval = ggml_opt_result_init(); for (lr.epoch = 0; lr.epoch < lr.epochs; ++lr.epoch) { - llama_opt_epoch(ctx.get(), dataset, result_train, result_eval, idata_split, + llama_opt_epoch(ctx, dataset, result_train, result_eval, idata_split, ggml_opt_epoch_callback_progress_bar, ggml_opt_epoch_callback_progress_bar); fprintf(stderr, "\n"); @@ -88,7 +89,7 @@ int main(int argc, char ** argv) { ggml_opt_result_free(result_train); ggml_opt_result_free(result_eval); - llama_model_save_to_file(model.get(), params.out_file.c_str()); + llama_model_save_to_file(model, params.out_file.c_str()); llama_backend_free(); diff --git a/tools/completion/completion.cpp b/tools/completion/completion.cpp index cb2641ae0a..85480f3369 100644 --- a/tools/completion/completion.cpp +++ b/tools/completion/completion.cpp @@ -141,13 +141,15 @@ int main(int argc, char ** argv) { // load the model and apply lora adapter, if any LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); - common_init_result llama_init = common_init_from_params(params); - model = llama_init.model.get(); - ctx = llama_init.context.get(); + auto llama_init = common_init_from_params(params); - if (model == NULL) { - LOG_ERR("%s: error: unable to load model\n", __func__); + ctx = llama_init->context(); + model = llama_init->model(); + smpl = llama_init->sampler(0); + + if (ctx == NULL) { + LOG_ERR("%s: error: unable to create context\n", __func__); return 1; } @@ -474,12 +476,6 @@ int main(int argc, char ** argv) { } } - smpl = common_sampler_init(model, sparams); - if (!smpl) { - LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__); - return 1; - } - LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl)); LOG_INF("sampler params: \n%s\n", sparams.print().c_str()); LOG_INF("sampler chain: %s\n", common_sampler_print(smpl).c_str()); @@ -993,8 +989,6 @@ int main(int argc, char ** argv) { LOG("\n\n"); common_perf_print(ctx, smpl); - common_sampler_free(smpl); - llama_backend_free(); ggml_threadpool_free_fn(threadpool); diff --git a/tools/cvector-generator/cvector-generator.cpp b/tools/cvector-generator/cvector-generator.cpp index d2d97e05ce..3ba7c52950 100644 --- a/tools/cvector-generator/cvector-generator.cpp +++ b/tools/cvector-generator/cvector-generator.cpp @@ -419,10 +419,10 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); // load the model to get hparams - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); // int n_ctx = llama_n_ctx(ctx); int n_layers = llama_model_n_layer(model); diff --git a/tools/imatrix/imatrix.cpp b/tools/imatrix/imatrix.cpp index f28a036dee..669de55ddb 100644 --- a/tools/imatrix/imatrix.cpp +++ b/tools/imatrix/imatrix.cpp @@ -1265,10 +1265,10 @@ int main(int argc, char ** argv) { params.warmup = false; // init - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == nullptr || ctx == nullptr) { LOG_ERR("%s : failed to init\n", __func__); diff --git a/tools/mtmd/mtmd-cli.cpp b/tools/mtmd/mtmd-cli.cpp index 25d24603db..332d2049e5 100644 --- a/tools/mtmd/mtmd-cli.cpp +++ b/tools/mtmd/mtmd-cli.cpp @@ -65,7 +65,7 @@ static void sigint_handler(int signo) { struct mtmd_cli_context { mtmd::context_ptr ctx_vision; - common_init_result llama_init; + common_init_result_ptr llama_init; llama_model * model; llama_context * lctx; @@ -89,8 +89,8 @@ struct mtmd_cli_context { llama_pos n_past = 0; mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) { - model = llama_init.model.get(); - lctx = llama_init.context.get(); + model = llama_init->model(); + lctx = llama_init->context(); vocab = llama_model_get_vocab(model); smpl = common_sampler_init(model, params.sampling); n_threads = params.cpuparams.n_threads; diff --git a/tools/perplexity/perplexity.cpp b/tools/perplexity/perplexity.cpp index caf080e8d1..1ead9c871e 100644 --- a/tools/perplexity/perplexity.cpp +++ b/tools/perplexity/perplexity.cpp @@ -2024,10 +2024,10 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); // load the model and apply lora adapter, if any - common_init_result llama_init = common_init_from_params(params); + auto llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + auto * model = llama_init->model(); + auto * ctx = llama_init->context(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); diff --git a/tools/server/server-context.cpp b/tools/server/server-context.cpp index 5a67f508df..90898b5ec4 100644 --- a/tools/server/server-context.cpp +++ b/tools/server/server-context.cpp @@ -153,7 +153,7 @@ struct server_slot { // sampling json json_schema; - struct common_sampler * smpl = nullptr; + common_sampler_ptr smpl; llama_token sampled; // in speculative mode, this is the last accepted token llama_tokens drafted; @@ -510,8 +510,8 @@ struct server_context_impl { common_params params_base; // note: keep these alive - they determine the lifetime of the model, context, etc. - common_init_result llama_init; - common_init_result llama_init_dft; + common_init_result_ptr llama_init; + common_init_result_ptr llama_init_dft; llama_model * model = nullptr; llama_context * ctx = nullptr; @@ -557,9 +557,6 @@ struct server_context_impl { // Clear any sampling context for (server_slot & slot : slots) { - common_sampler_free(slot.smpl); - slot.smpl = nullptr; - llama_free(slot.ctx_dft); slot.ctx_dft = nullptr; @@ -580,8 +577,8 @@ struct server_context_impl { llama_init = common_init_from_params(params_base); - model = llama_init.model.get(); - ctx = llama_init.context.get(); + model = llama_init->model(); + ctx = llama_init->context(); if (model == nullptr) { SRV_ERR("failed to load model, '%s'\n", params_base.model.path.c_str()); @@ -613,25 +610,25 @@ struct server_context_impl { llama_init_dft = common_init_from_params(params_dft); - model_dft = llama_init_dft.model.get(); + model_dft = llama_init_dft->model(); if (model_dft == nullptr) { SRV_ERR("failed to load draft model, '%s'\n", params_base.speculative.model.path.c_str()); return false; } - vocab_dft_compatible = common_speculative_are_compatible(ctx, llama_init_dft.context.get()); + vocab_dft_compatible = common_speculative_are_compatible(ctx, llama_init_dft->context()); if (!vocab_dft_compatible) { SRV_INF("the draft model '%s' is not compatible with the target model '%s'. tokens will be translated between the draft and target models.\n", params_base.speculative.model.path.c_str(), params_base.model.path.c_str()); } - const int n_ctx_dft = llama_n_ctx(llama_init_dft.context.get()); + const int n_ctx_dft = llama_n_ctx(llama_init_dft->context()); cparams_dft = common_context_params_to_llama(params_dft); cparams_dft.n_batch = n_ctx_dft; // the context is not needed - we will create one for each slot - llama_init_dft.context.reset(); + llama_init_dft->free_context(); } chat_templates = common_chat_templates_init(model, params_base.chat_template); @@ -1051,18 +1048,15 @@ struct server_context_impl { // initialize samplers { - if (slot.smpl != nullptr) { - common_sampler_free(slot.smpl); - } + slot.smpl.reset(common_sampler_init(model, task.params.sampling)); - slot.smpl = common_sampler_init(model, task.params.sampling); if (slot.smpl == nullptr) { // for now, the only error that may happen here is invalid grammar send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST); return false; } - SLT_INF(slot, "sampler chain: %s\n", common_sampler_print(slot.smpl).c_str()); + SLT_INF(slot, "sampler chain: %s\n", common_sampler_print(slot.smpl.get()).c_str()); } // initialize draft batch @@ -1216,11 +1210,10 @@ struct server_context_impl { } void populate_token_probs(const server_slot & slot, completion_token_output & result, bool post_sampling, bool special, int idx) const { - size_t n_probs = slot.task->params.sampling.n_probs; - size_t n_vocab = llama_vocab_n_tokens(vocab); + const size_t n_probs = slot.task->params.sampling.n_probs; if (post_sampling) { - const auto * cur_p = common_sampler_get_candidates(slot.smpl, true); + const auto * cur_p = common_sampler_get_candidates(slot.smpl.get(), true); const size_t max_probs = cur_p->size; // set probability for sampled token @@ -1245,7 +1238,7 @@ struct server_context_impl { std::vector cur = get_token_probabilities(ctx, idx); // set probability for sampled token - for (size_t i = 0; i < n_vocab; i++) { + for (size_t i = 0; i < cur.size(); i++) { // set probability for sampled token if (cur[i].id == result.tok) { result.prob = cur[i].p; @@ -1255,7 +1248,7 @@ struct server_context_impl { // set probability for top n_probs tokens result.probs.reserve(n_probs); - for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) { + for (size_t i = 0; i < std::min(cur.size(), n_probs); i++) { result.probs.push_back({ cur[i].id, common_token_to_piece(ctx, cur[i].id, special), @@ -2301,13 +2294,13 @@ struct server_context_impl { GGML_ASSERT(batch.n_tokens > 0); - common_sampler_reset(slot.smpl); + common_sampler_reset(slot.smpl.get()); // Process all prompt tokens through sampler system for (int i = 0; i < slot.task->n_tokens(); ++i) { llama_token id = input_tokens[i]; if (id != LLAMA_TOKEN_NULL) { - common_sampler_accept(slot.smpl, id, false); + common_sampler_accept(slot.smpl.get(), id, false); } } @@ -2525,11 +2518,11 @@ struct server_context_impl { const int tok_idx = slot.i_batch - i; - llama_token id = common_sampler_sample(slot.smpl, ctx, tok_idx); + llama_token id = common_sampler_sample(slot.smpl.get(), ctx, tok_idx); slot.i_batch = -1; - common_sampler_accept(slot.smpl, id, true); + common_sampler_accept(slot.smpl.get(), id, true); slot.n_decoded += 1; @@ -2570,7 +2563,7 @@ struct server_context_impl { size_t n_draft = slot.drafted.size(); // the accepted tokens from the speculation - const auto ids = common_sampler_sample_and_accept_n(slot.smpl, ctx, slot.i_batch_dft, slot.drafted); + const auto ids = common_sampler_sample_and_accept_n(slot.smpl.get(), ctx, slot.i_batch_dft, slot.drafted); slot.i_batch_dft.clear(); slot.drafted.clear(); diff --git a/tools/server/tests/unit/test_compat_anthropic.py b/tools/server/tests/unit/test_compat_anthropic.py index d55dd1d945..e0a003557e 100644 --- a/tools/server/tests/unit/test_compat_anthropic.py +++ b/tools/server/tests/unit/test_compat_anthropic.py @@ -684,7 +684,7 @@ def test_anthropic_streaming_content_block_indices(): # Request that might produce both text and tool use res = server.make_stream_request("POST", "/v1/messages", data={ "model": "test", - "max_tokens": 200, + "max_tokens": 400, "stream": True, "tools": [{ "name": "test_tool", diff --git a/tools/tts/tts.cpp b/tools/tts/tts.cpp index eaf56591d9..8c39fce8ba 100644 --- a/tools/tts/tts.cpp +++ b/tools/tts/tts.cpp @@ -568,10 +568,10 @@ int main(int argc, char ** argv) { llama_context * ctx_ttc = NULL; llama_context * ctx_cts = NULL; - common_init_result llama_init_ttc = common_init_from_params(params); + auto llama_init_ttc = common_init_from_params(params); - model_ttc = llama_init_ttc.model.get(); - ctx_ttc = llama_init_ttc.context.get(); + model_ttc = llama_init_ttc->model(); + ctx_ttc = llama_init_ttc->context(); if (model_ttc == nullptr || ctx_ttc == nullptr) { return ENOENT; @@ -583,10 +583,10 @@ int main(int argc, char ** argv) { params.embedding = true; params.n_ubatch = params.n_batch; - common_init_result llama_init_cts = common_init_from_params(params); + auto llama_init_cts = common_init_from_params(params); - model_cts = llama_init_cts.model.get(); - ctx_cts = llama_init_cts.context.get(); + model_cts = llama_init_cts->model(); + ctx_cts = llama_init_cts->context(); if (model_cts == nullptr || ctx_cts == nullptr) { return ENOENT; From 0759b09c90f9e1bb8beebe74882b9f094b91f7bb Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sun, 14 Dec 2025 13:05:59 +0100 Subject: [PATCH 16/23] graph: add f_attn_temp_offset (#18025) --- src/llama-graph.cpp | 4 ++-- src/llama-graph.h | 5 +++-- src/llama-hparams.h | 1 + src/llama-model.cpp | 5 +++++ 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 6cf9a883a6..8909bbfb95 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -78,7 +78,7 @@ void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) { for (int i = 0; i < n_tokens; ++i) { const float pos = ubatch->pos[i]; attn_scale_data[i] = std::log( - std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0 + std::floor((pos + f_attn_temp_offset) / n_attn_temp_floor_scale) + 1.0 ) * f_attn_temp_scale + 1.0; } @@ -1203,7 +1203,7 @@ ggml_tensor * llm_graph_context::build_inp_pos() const { } ggml_tensor * llm_graph_context::build_inp_attn_scale() const { - auto inp = std::make_unique(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale); + auto inp = std::make_unique(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale, hparams.f_attn_temp_offset); auto & cur = inp->attn_scale; diff --git a/src/llama-graph.h b/src/llama-graph.h index d0c3934f67..e9d387bd7c 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -132,8 +132,8 @@ public: // temperature tuning, used by llama4 class llm_graph_input_attn_temp : public llm_graph_input_i { public: - llm_graph_input_attn_temp(uint32_t n_attn_temp_floor_scale, float f_attn_temp_scale) - : n_attn_temp_floor_scale(n_attn_temp_floor_scale), f_attn_temp_scale(f_attn_temp_scale) {} + llm_graph_input_attn_temp(uint32_t n_attn_temp_floor_scale, float f_attn_temp_scale, float f_attn_temp_offset) + : n_attn_temp_floor_scale(n_attn_temp_floor_scale), f_attn_temp_scale(f_attn_temp_scale), f_attn_temp_offset(f_attn_temp_offset) {} virtual ~llm_graph_input_attn_temp() = default; void set_input(const llama_ubatch * ubatch) override; @@ -142,6 +142,7 @@ public: const uint32_t n_attn_temp_floor_scale; const float f_attn_temp_scale; + const float f_attn_temp_offset; }; class llm_graph_input_pos_bucket : public llm_graph_input_i { diff --git a/src/llama-hparams.h b/src/llama-hparams.h index aab319754e..a467c64a14 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -165,6 +165,7 @@ struct llama_hparams { uint32_t n_no_rope_layer_step = 4; uint32_t n_attn_temp_floor_scale = 0; float f_attn_temp_scale = 0.0f; + float f_attn_temp_offset = 0.0f; // offset position index // gemma3n altup uint32_t n_altup = 4; // altup_num_inputs diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 5da1dd6dbb..28f06b4e61 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -668,6 +668,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { hparams.n_swa = 8192; hparams.n_attn_temp_floor_scale = 8192; hparams.f_attn_temp_scale = 0.1f; + hparams.f_attn_temp_offset = 1.0f; hparams.set_swa_pattern(4); // pattern: 3 chunked - 1 full } @@ -1646,6 +1647,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE, hparams.f_attn_temp_scale, false); ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false); + hparams.f_attn_temp_offset = 0.0f; + switch (hparams.n_layer) { case 27: type = LLM_TYPE_16B; break; case 60: type = LLM_TYPE_236B; break; @@ -2276,6 +2279,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_SLOW, hparams.yarn_beta_slow, false); ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, 0.0f); + hparams.f_attn_temp_offset = 0.0f; + // TODO: maybe add n_attn_temp_floor_scale as a separate KV? if (hparams.f_attn_temp_scale != 0.0f) { hparams.n_attn_temp_floor_scale = hparams.n_ctx_orig_yarn; From 9e6649ecf244a99749dacc28fc4f49f7d6ad6f60 Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Sun, 14 Dec 2025 14:52:46 +0100 Subject: [PATCH 17/23] vulkan: fix mul_mat_vec_iq1_s formatting (#18026) --- .../vulkan-shaders/mul_mat_vec_iq1_s.comp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp index e6b1f20215..c5f5e9cbb2 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp @@ -10,44 +10,44 @@ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) { const uint y_idx_base = i * QUANT_K + 32 * ib32; - [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { const uint base_b_idx = (j * p.batch_stride_b + b_offset + y_idx_base) / 4; - [[unroll]] for (uint l = 0; l < 4; ++l) { + [[unroll]] for (uint l = 0; l < 4; ++l) { const vec4 b_val_0 = vec4(data_b_v4[base_b_idx + 2 * l]); const vec4 b_val_1 = vec4(data_b_v4[base_b_idx + 2 * l + 1]); // index for data_a uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { const float d = float(data_a[ibi].d); const uint qh = data_a[ibi].qh[ib32]; const float dl = d * float(2 * bitfieldExtract(qh, 12, 3) + 1); const uint qs = data_a[ibi].qs[4 * ib32 + l]; - const uint idxhi = bitfieldExtract(qh, 3 * int(l), 3); + const uint idxhi = bitfieldExtract(qh, 3 * int(l), 3); const uint16_t grid = uint16_t(iq1s_grid[qs | (idxhi << 8)]); const float delta_val = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA; - const vec4 delta_v = vec4(delta_val); + const vec4 delta_v = vec4(delta_val); const vec4 fbits0 = vec4( float(bitfieldExtract(grid, 0, 2)), float(bitfieldExtract(grid, 2, 2)), float(bitfieldExtract(grid, 4, 2)), float(bitfieldExtract(grid, 6, 2)) - ); + ); const vec4 fbits1 = vec4( float(bitfieldExtract(grid, 8, 2)), float(bitfieldExtract(grid, 10, 2)), float(bitfieldExtract(grid, 12, 2)), float(bitfieldExtract(grid, 14, 2)) ); - + vec4 sum_v = fma(b_val_0, fbits0 + delta_v, vec4(0.0)); sum_v = fma(b_val_1, fbits1 + delta_v, sum_v); - FLOAT_TYPE sum = dot(sum_v, vec4(1.0)); - - temp[j][n] = fma(dl, sum, temp[j][n]); + FLOAT_TYPE sum = dot(sum_v, vec4(1.0)); + + temp[j][n] = fma(dl, sum, temp[j][n]); ibi += num_blocks_per_row; } } From 37f5a1093bf7f6c58630d916871c54e05a8364d7 Mon Sep 17 00:00:00 2001 From: Haowei Wu Date: Sun, 14 Dec 2025 22:57:52 +0800 Subject: [PATCH 18/23] mtmd: enhance image resizing in llava_uhd (#18014) --- tools/mtmd/clip.cpp | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index bb922e30b4..5cc4e4b1fa 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -2230,7 +2230,14 @@ struct llava_uhd { clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size) clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices std::vector slices; + + img_tool::resize_algo interpolation_overview = img_tool::RESIZE_ALGO_BILINEAR; + bool padding_overview = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6) + std::array pad_color_overview = {0, 0, 0}; + + img_tool::resize_algo interpolation_refined = img_tool::RESIZE_ALGO_BICUBIC; bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6) + std::array pad_color_refined = {0, 0, 0}; }; static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) { @@ -2257,10 +2264,11 @@ struct llava_uhd { auto refine_size = llava_uhd::select_best_resolution( original_size, ctx->model.hparams.image_res_candidates); - res.overview_size = clip_image_size{slice_size, slice_size}; - res.refined_size = refine_size; - res.grid_size = clip_image_size{0, 0}; - res.padding_refined = true; + res.overview_size = clip_image_size{slice_size, slice_size}; + res.refined_size = refine_size; + res.grid_size = clip_image_size{0, 0}; + res.padding_refined = true; + res.interpolation_refined = img_tool::RESIZE_ALGO_BILINEAR; // preserve old behavior when padding LOG_DBG("%s: using pinpoints for slicing\n", __func__); LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n", @@ -2339,12 +2347,13 @@ struct llava_uhd { static std::vector slice_image(const clip_image_u8 * img, const slice_instructions & inst) { std::vector output; - img_tool::resize_algo interpolation = img_tool::RESIZE_ALGO_BILINEAR; // TODO: make it configurable // resize to overview size clip_image_u8_ptr resized_img(clip_image_u8_init()); - img_tool::resize(*img, *resized_img, inst.overview_size, interpolation); + img_tool::resize(*img, *resized_img, inst.overview_size, inst.interpolation_overview, + inst.padding_overview, inst.pad_color_overview); output.push_back(std::move(resized_img)); + if (inst.slices.empty()) { // no slices, just return the resized image return output; @@ -2352,13 +2361,8 @@ struct llava_uhd { // resize to refined size clip_image_u8_ptr refined_img(clip_image_u8_init()); - if (inst.padding_refined) { - img_tool::resize(*img, *refined_img, inst.refined_size, interpolation); - } else { - // only algo bicubic preserves the ratio; old models rely on this behavior - // TODO: do we need to support other algos here? - img_tool::resize(*img, *refined_img, inst.refined_size, img_tool::RESIZE_ALGO_BICUBIC, false); - } + img_tool::resize(*img, *refined_img, inst.refined_size, inst.interpolation_refined, + inst.padding_refined, inst.pad_color_refined); // create slices for (const auto & slice : inst.slices) { From 5c8a717128cc98aa9e5b1c44652f5cf458fd426e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sun, 14 Dec 2025 16:04:37 +0100 Subject: [PATCH 19/23] convert : refactor rope scaling handling (#18013) * refactor rope scaling handling * ws-- * missed a couple * use find_hparam --- convert_hf_to_gguf.py | 357 +++++++++++------------------------------- 1 file changed, 88 insertions(+), 269 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 151608d56b..3f861f2a6a 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -136,11 +136,19 @@ class ModelBase: self.remote_hf_model_id = remote_hf_model_id self.sentence_transformers_dense_modules = sentence_transformers_dense_modules self.hparams = ModelBase.load_hparams(self.dir_model, self.is_mistral_format) if hparams is None else hparams + self.rope_parameters = self.hparams.get("rope_parameters", self.hparams.get("rope_scaling")) or {} self.model_tensors = self.index_tensors(remote_hf_model_id=remote_hf_model_id) self.metadata_override = metadata_override self.model_name = model_name self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py + # Ensure "rope_theta" and "rope_type" is mirrored in rope_parameters + if "full_attention" not in self.rope_parameters and "sliding_attention" not in self.rope_parameters: + if "rope_theta" not in self.rope_parameters and (rope_theta := self.find_hparam(["rope_theta", "global_rope_theta", "rotary_emb_base"], optional=True)) is not None: + self.rope_parameters["rope_theta"] = rope_theta + if "rope_type" not in self.rope_parameters and (rope_type := self.rope_parameters.get("type")) is not None: + self.rope_parameters["rope_type"] = rope_type + # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type if self.ftype == gguf.LlamaFileType.GUESSED: # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. @@ -795,7 +803,7 @@ class TextModel(ModelBase): def set_gguf_parameters(self): self.gguf_writer.add_block_count(self.block_count) - if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None: + if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length", "max_sequence_length", "model_max_length"], optional=True)) is not None: self.gguf_writer.add_context_length(n_ctx) logger.info(f"gguf: context length = {n_ctx}") @@ -815,7 +823,42 @@ class TextModel(ModelBase): self.gguf_writer.add_head_count_kv(n_head_kv) logger.info(f"gguf: key-value head count = {n_head_kv}") - if (rope_theta := self.hparams.get("rope_theta")) is not None: + rope_params = self.rope_parameters.get("full_attention", self.rope_parameters) + if (rope_type := rope_params.get("rope_type")) is not None: + rope_factor = rope_params.get("factor") + rope_gguf_type = gguf.RopeScalingType.NONE + if rope_type == "linear" and rope_factor is not None: + rope_gguf_type = gguf.RopeScalingType.LINEAR + self.gguf_writer.add_rope_scaling_type(rope_gguf_type) + self.gguf_writer.add_rope_scaling_factor(rope_factor) + elif rope_type == "yarn" and rope_factor is not None: + rope_gguf_type = gguf.RopeScalingType.YARN + self.gguf_writer.add_rope_scaling_type(rope_gguf_type) + self.gguf_writer.add_rope_scaling_factor(rope_factor) + self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_params["original_max_position_embeddings"]) + if (yarn_ext_factor := rope_params.get("extrapolation_factor")) is not None: + self.gguf_writer.add_rope_scaling_yarn_ext_factor(yarn_ext_factor) + if (yarn_attn_factor := rope_params.get("attention_factor", rope_params.get("attn_factor"))) is not None: + self.gguf_writer.add_rope_scaling_yarn_attn_factor(yarn_attn_factor) + if (yarn_beta_fast := rope_params.get("beta_fast")) is not None: + self.gguf_writer.add_rope_scaling_yarn_beta_fast(yarn_beta_fast) + if (yarn_beta_slow := rope_params.get("beta_slow")) is not None: + self.gguf_writer.add_rope_scaling_yarn_beta_slow(yarn_beta_slow) + # self.gguf_writer.add_rope_scaling_yarn_log_mul(rope_params["mscale_all_dim"]) + elif rope_type == "su" or rope_type == "longrope": + rope_gguf_type = gguf.RopeScalingType.LONGROPE + self.gguf_writer.add_rope_scaling_type(rope_gguf_type) + elif rope_type == "dynamic": + # HunYuan, handled in model class + pass + elif rope_type.lower() == "llama3": + # Handled in generate_extra_tensors + pass + else: + logger.warning(f"Unknown RoPE type: {rope_type}") + logger.info(f"gguf: rope scaling type = {rope_gguf_type.name}") + + if (rope_theta := rope_params.get("rope_theta")) is not None: self.gguf_writer.add_rope_freq_base(rope_theta) logger.info(f"gguf: rope theta = {rope_theta}") if (f_rms_eps := self.find_hparam(["rms_norm_eps", "norm_eps"], optional=True)) is not None: @@ -1966,34 +2009,10 @@ class BaichuanModel(TextModel): self._set_vocab_sentencepiece() def set_gguf_parameters(self): - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") + super().set_gguf_parameters() self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: head_count = self.hparams["num_attention_heads"] @@ -2089,34 +2108,10 @@ class XverseModel(TextModel): special_vocab.add_to_gguf(self.gguf_writer) def set_gguf_parameters(self): - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") + super().set_gguf_parameters() self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused @@ -2430,11 +2425,6 @@ class LlamaModel(TextModel): rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - @staticmethod def permute(weights: Tensor, n_head: int, n_head_kv: int | None): if n_head_kv is not None and n_head != n_head_kv: @@ -2518,16 +2508,16 @@ class LlamaModel(TextModel): return [(self.map_tensor_name(name), data_torch)] def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: - if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): - if rope_scaling.get("rope_type", '').lower() == "llama3": - base = self.hparams.get("rope_theta", 10000.0) + if rope_params := self.rope_parameters.get("full_attention", self.rope_parameters): + if rope_params.get("rope_type", '').lower() == "llama3": + base = rope_params.get("rope_theta", 10000.0) if (dim := self.hparams.get("head_dim")) is None: dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) - factor = rope_scaling.get("factor", 8.0) - low_freq_factor = rope_scaling.get("low_freq_factor", 1.0) - high_freq_factor = rope_scaling.get("high_freq_factor", 4.0) + factor = rope_params.get("factor", 8.0) + low_freq_factor = rope_params.get("low_freq_factor", 1.0) + high_freq_factor = rope_params.get("high_freq_factor", 4.0) old_context_len = self.hparams.get("original_max_position_embeddings", 8192) low_freq_wavelen = old_context_len / low_freq_factor @@ -2564,11 +2554,6 @@ class ArceeModel(LlamaModel): def set_gguf_parameters(self): super().set_gguf_parameters() self._try_set_pooling_type() - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) @ModelBase.register("AfmoeForCausalLM") @@ -2851,17 +2836,11 @@ class Mistral3Model(LlamaModel): def set_gguf_parameters(self): super().set_gguf_parameters() - rope_params = self.hparams.get("rope_parameters") + rope_params = self.rope_parameters if self.hparams.get("model_type") == "ministral3": - assert rope_params is not None, "ministral3 must have 'rope_parameters' config" + assert rope_params, "ministral3 must have 'rope_parameters' config" assert rope_params["rope_type"] == "yarn", "ministral3 rope_type must be 'yarn'" - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_params["factor"]) - self.gguf_writer.add_rope_scaling_yarn_beta_fast(rope_params["beta_fast"]) - self.gguf_writer.add_rope_scaling_yarn_beta_slow(rope_params["beta_slow"]) self.gguf_writer.add_rope_scaling_yarn_log_mul(rope_params["mscale_all_dim"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_params["original_max_position_embeddings"]) - self.gguf_writer.add_rope_freq_base(rope_params["rope_theta"]) self.gguf_writer.add_attn_temperature_scale(rope_params["llama_4_scaling_beta"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): @@ -2958,7 +2937,7 @@ class DeciModel(TextModel): assert self.block_count == len(self._num_kv_heads) assert self.block_count == len(self._num_heads) assert self.block_count == len(self._ffn_dims) - if (rope_theta := self.hparams.get("rope_theta")) is not None: + if (rope_theta := self.rope_parameters.get("rope_theta")) is not None: self.gguf_writer.add_rope_freq_base(rope_theta) self.gguf_writer.add_head_count_kv(self._num_kv_heads) self.gguf_writer.add_head_count(self._num_heads) @@ -2983,11 +2962,6 @@ class DeciModel(TextModel): rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - @staticmethod def permute(weights: Tensor, n_head: int, n_head_kv: int | None): if n_head_kv is not None and n_head != n_head_kv: @@ -3016,16 +2990,16 @@ class DeciModel(TextModel): return [(self.map_tensor_name(name), data_torch)] def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: - if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): - if rope_scaling.get("rope_type", '').lower() == "llama3": - base = self.hparams.get("rope_theta", 10000.0) + if rope_params := self.rope_parameters.get("full_attention", self.rope_parameters): + if rope_params.get("rope_type", '').lower() == "llama3": + base = rope_params.get("rope_theta", 10000.0) if (dim := self.hparams.get("head_dim")) is None: dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) - factor = rope_scaling.get("factor", 8.0) - low_freq_factor = rope_scaling.get("low_freq_factor", 1.0) - high_freq_factor = rope_scaling.get("high_freq_factor", 4.0) + factor = rope_params.get("factor", 8.0) + low_freq_factor = rope_params.get("low_freq_factor", 1.0) + high_freq_factor = rope_params.get("high_freq_factor", 4.0) old_context_len = self.hparams.get("original_max_position_embeddings", 8192) low_freq_wavelen = old_context_len / low_freq_factor @@ -3279,10 +3253,6 @@ class MiniCPMModel(TextModel): logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"] self.gguf_writer.add_logit_scale(logit_scale) logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}") - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "longrope": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE) - logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}") def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] @@ -3402,17 +3372,6 @@ class QwenModel(TextModel): def set_vocab(self): self._set_vocab_qwen() - def set_gguf_parameters(self): - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration") class Qwen2Model(TextModel): @@ -3427,11 +3386,6 @@ class Qwen2Model(TextModel): def set_gguf_parameters(self): super().set_gguf_parameters() self._try_set_pooling_type() - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if self.hf_arch == "Qwen2Model": @@ -3499,12 +3453,6 @@ class DreamModel(TextModel): # Dream models use non-causal attention for diffusion self.gguf_writer.add_causal_attention(False) - # Handle RoPE scaling similar to Qwen2 - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) # Add Dream-specific parameters mask_token_id = self.hparams.get("mask_token_id") @@ -4048,13 +3996,6 @@ class Qwen2MoeModel(TextModel): if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None: self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size) logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}") - # YaRN is not enabled by default - # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) _experts: list[dict[str, Tensor]] | None = None @@ -4656,7 +4597,7 @@ class Phi3MiniModel(TextModel): self.gguf_writer.add_head_count_kv(n_head_kv) self.gguf_writer.add_layer_norm_rms_eps(rms_eps) self.gguf_writer.add_rope_dimension_count(rope_dims) - self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"])) + self.gguf_writer.add_rope_freq_base(self.rope_parameters.get("full_attention", self.rope_parameters)["rope_theta"]) self.gguf_writer.add_file_type(self.ftype) sliding_window = self.hparams.get("sliding_window") # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models @@ -4932,7 +4873,7 @@ class Plamo2Model(TextModel): self.gguf_writer.add_value_length(hparams.get("hidden_size_per_head", 128)) self.gguf_writer.add_block_count(self.block_count) self.gguf_writer.add_layer_norm_rms_eps(hparams.get("rms_norm_eps", 1e-06)) - self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 10000)) + self.gguf_writer.add_rope_freq_base(self.rope_parameters.get("rope_theta", 10000)) # Mamba parameters self.gguf_writer.add_ssm_state_size(hparams.get("mamba_d_state", 64)) @@ -5130,21 +5071,6 @@ class InternLM2Model(TextModel): special_vocab.add_to_gguf(self.gguf_writer) - def set_gguf_parameters(self): - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - self.gguf_writer.add_file_type(self.ftype) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: num_heads = self.hparams["num_attention_heads"] num_kv_heads = self.hparams["num_key_value_heads"] @@ -5221,11 +5147,6 @@ class InternLM3Model(TextModel): rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") @@ -5588,7 +5509,6 @@ class NomicBertModel(BertModel): def set_gguf_parameters(self): super().set_gguf_parameters() - self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) if self.is_moe: self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"]) self.gguf_writer.add_expert_count(self.hparams["num_experts"]) @@ -5711,8 +5631,6 @@ class XLMRobertaModel(BertModel): super().set_gguf_parameters() # jina-embeddings-v3 - if rotary_emb_base := self.hparams.get("rotary_emb_base"): - self.gguf_writer.add_rope_freq_base(rotary_emb_base) lora_alpha = self.hparams.get("lora_alpha") if lora_prompt_prefixes := self.hparams.get("task_instructions"): assert self._lora_files and all(lora_name in lora_prompt_prefixes for lora_name in self._lora_files.keys()) @@ -5840,19 +5758,16 @@ class Gemma3Model(TextModel): self._set_vocab_gpt2() def set_gguf_parameters(self): + super().set_gguf_parameters() hparams = self.hparams # some default values are not specified in the hparams self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072)) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8)) self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6)) self.gguf_writer.add_key_length(hparams.get("head_dim", 256)) self.gguf_writer.add_value_length(hparams.get("head_dim", 256)) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers + self.gguf_writer.add_rope_freq_base(self.rope_parameters.get("full_attention", self.rope_parameters).get("rope_theta", 1_000_000.0)) # for global layers # attn_logit_softcapping is removed in Gemma3 assert hparams.get("attn_logit_softcapping") is None if (final_logit_softcap := hparams.get("final_logit_softcapping")): @@ -5860,19 +5775,6 @@ class Gemma3Model(TextModel): if hparams.get("sliding_window_pattern") != 1: self.gguf_writer.add_sliding_window(hparams["sliding_window"]) self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4)) - if hparams.get("rope_scaling") is not None: - rope_scaling = hparams["rope_scaling"] - if rope_scaling["rope_type"] == "linear": - # important: this rope_scaling is only applied for global layers, and not used by 1B model - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - elif rope_scaling["rope_type"] == "yarn": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) - self.gguf_writer.add_rope_scaling_yarn_ext_factor(rope_scaling["extrapolation_factor"]) - self.gguf_writer.add_rope_scaling_yarn_beta_fast(rope_scaling["beta_fast"]) - self.gguf_writer.add_rope_scaling_yarn_beta_slow(rope_scaling["beta_slow"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused @@ -6776,13 +6678,6 @@ class Olmo2Model(TextModel): def set_gguf_parameters(self): super().set_gguf_parameters() - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_attn_factors(rope_scaling["attention_factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) - if "sliding_window" in self.hparams: self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) @@ -7281,16 +7176,11 @@ class DeepseekV2Model(TextModel): self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"]) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) - + if (rope_mscale_all := self.rope_parameters.get("mscale_all_dim")) is not None: # [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX] # note: for legacy reasons, this is not consistent with the other usages of self.gguf_writer.add_rope_scaling_yarn_log_mul # ref https://github.com/ggml-org/llama.cpp/pull/17945 - self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_scaling["mscale_all_dim"]) + self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_mscale_all) _experts: list[dict[str, Tensor]] | None = None @@ -7898,11 +7788,6 @@ class Glm4Model(TextModel): if (rope_dim := self.hparams.get("head_dim")) is None: rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if name.startswith("model.visual."): # ignore visual part of Glm4v @@ -8240,50 +8125,26 @@ class ExaoneModel(TextModel): model_arch = gguf.MODEL_ARCH.EXAONE def set_gguf_parameters(self): + super().set_gguf_parameters() hparams = self.hparams assert (hparams["activation_function"] == "silu") - max_position_embeddings = hparams["max_position_embeddings"] - embed_dim = hparams["hidden_size"] - num_heads = hparams["num_attention_heads"] - num_kv_heads = hparams.get("num_key_value_heads", num_heads) - layer_norm_eps = hparams["layer_norm_epsilon"] - intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim - # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0 - # attention_dropout_rate = hparams["attention_dropout"] - # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0 - # embed_dropout_rate = hparams["embed_dropout"] - self.gguf_writer.add_embedding_length(embed_dim) - self.gguf_writer.add_head_count(num_heads) - self.gguf_writer.add_head_count_kv(num_kv_heads) - self.gguf_writer.add_context_length(max_position_embeddings) - self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps) - self.gguf_writer.add_feed_forward_length(intermediate_size) - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_file_type(self.ftype) - - if (rope_theta := self.hparams.get("rope_theta")) is not None: - self.gguf_writer.add_rope_freq_base(rope_theta) rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True) rotary_factor = rotary_factor if rotary_factor is not None else 1.0 self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"]))) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: - if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): - if rope_scaling.get("rope_type", '').lower() == "llama3": - base = self.hparams.get("rope_theta", 10000.0) + if rope_params := self.rope_parameters.get("full_attention", self.rope_parameters): + if rope_params.get("rope_type", '').lower() == "llama3": + base = self.rope_parameters.get("rope_theta", 10000.0) if (dim := self.hparams.get("head_dim")) is None: dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) - factor = rope_scaling.get("factor", 8.0) - low_freq_factor = rope_scaling.get("low_freq_factor", 1.0) - high_freq_factor = rope_scaling.get("high_freq_factor", 4.0) + factor = rope_params.get("factor", 8.0) + low_freq_factor = rope_params.get("low_freq_factor", 1.0) + high_freq_factor = rope_params.get("high_freq_factor", 4.0) old_context_len = self.hparams.get("original_max_position_embeddings", 8192) low_freq_wavelen = old_context_len / low_freq_factor @@ -8338,22 +8199,17 @@ class Exaone4Model(TextModel): if len(sliding_window_pattern) == hparams["num_hidden_layers"]: self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: - if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): - if rope_scaling.get("rope_type", '').lower() == "llama3": - base = self.hparams.get("rope_theta", 10_000.0) + if rope_params := self.rope_parameters.get("full_attention", self.rope_parameters): + if rope_params.get("rope_type", '').lower() == "llama3": + base = rope_params.get("rope_theta", 10_000.0) if (dim := self.hparams.get("head_dim")) is None: dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) - factor = rope_scaling.get("factor", 16.0) - low_freq_factor = rope_scaling.get("low_freq_factor", 1.0) - high_freq_factor = rope_scaling.get("high_freq_factor", 4.0) + factor = rope_params.get("factor", 16.0) + low_freq_factor = rope_params.get("low_freq_factor", 1.0) + high_freq_factor = rope_params.get("high_freq_factor", 4.0) old_context_len = self.hparams.get("original_max_position_embeddings", 8192) low_freq_wavelen = old_context_len / low_freq_factor @@ -8664,13 +8520,6 @@ class BailingMoeModel(TextModel): rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) - else: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"]) self.gguf_writer.add_vocab_size(hparams["vocab_size"]) self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"]) @@ -8777,13 +8626,6 @@ class BailingMoeV2Model(TextModel): rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))) - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) - else: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"]) self.gguf_writer.add_vocab_size(hparams["vocab_size"]) self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"]) @@ -8862,13 +8704,6 @@ class GroveMoeModel(TextModel): self.gguf_writer.add_experts_per_group(2) # FIXME?: Hardcoded https://huggingface.co/inclusionAI/GroveMoE-Inst/blob/c4c69e5970d18907b5e6ddccdfd55176fe292df1/modeling_grove_moe.py#L376 self.gguf_writer.add_expert_group_scale(0.05) - # YaRN is not enabled by default - # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) _experts: list[dict[str, Tensor]] | None = None _chunk_experts: list[dict[str, Tensor]] | None = None @@ -9178,7 +9013,7 @@ class FalconH1Model(Mamba2Model): assert self.d_inner % self.d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {self.d_head}" # Add any other Falcon Mamba2 specific configuration - self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"])) + self.gguf_writer.add_rope_freq_base(self.rope_parameters["rope_theta"]) @ModelBase.register("HunYuanMoEV1ForCausalLM") @@ -9256,12 +9091,11 @@ class HunYuanMoEModel(TextModel): self.gguf_writer.add_expert_shared_count(moe_shared_expert[0]) # Rope - rope_scaling = hparams.get("rope_scaling", {}) - if rope_scaling.get("type") == "dynamic": + if self.rope_parameters.get("rope_type") == "dynamic": # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf) - alpha = rope_scaling.get("alpha", 1000) - base = hparams.get("rope_theta", 10000.0) + alpha = self.rope_parameters.get("alpha", 1000) + base = self.rope_parameters.get("rope_theta", 10000.0) dim = (hparams["hidden_size"] // hparams["num_attention_heads"]) # 128 scaled_base = base * (alpha ** (dim / (dim - 2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251 self.gguf_writer.add_rope_freq_base(scaled_base) @@ -9456,12 +9290,11 @@ class HunYuanModel(TextModel): hparams = self.hparams # Rope - rope_scaling = hparams.get("rope_scaling", {}) - if rope_scaling.get("type") == "dynamic": + if self.rope_parameters.get("rope_type") == "dynamic": # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf) - alpha = rope_scaling.get("alpha", 50) - base = hparams.get("rope_theta", 10000.0) + alpha = self.rope_parameters.get("alpha", 50) + base = self.rope_parameters.get("rope_theta", 10000.0) dim = hparams["head_dim"] scaled_base = base * (alpha ** (dim / (dim - 2))) self.gguf_writer.add_rope_freq_base(scaled_base) @@ -9612,13 +9445,6 @@ class GptOssModel(TextModel): self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size"]) - rope_scaling = self.hparams.get("rope_scaling") or {} - rope_type = rope_scaling.get("rope_type", rope_scaling.get("type")) - assert rope_type == "yarn", f"GPT-OSS only supports yarn rope scaling, got {rope_type}" - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling.get("original_max_position_embeddings", 4096)) - @ModelBase.register("Lfm2ForCausalLM", "LFM2ForCausalLM") class LFM2Model(TextModel): @@ -9791,13 +9617,6 @@ class SmallThinkerModel(TextModel): self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX) else: self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID) - # YaRN is not enabled by default - # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts - rope_scaling = self.hparams.get("rope_scaling") or {} - if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) sliding_window_layout = self.hparams.get("sliding_window_layout") if sliding_window_layout: From 52392291b2f3a24b5d3fef4fc0b56f10db358dc1 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sun, 14 Dec 2025 22:08:10 +0100 Subject: [PATCH 20/23] preset: handle negated arg, reverse the meaning if needed (#18041) --- common/preset.cpp | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/common/preset.cpp b/common/preset.cpp index 729c27f2cf..60746aad58 100644 --- a/common/preset.cpp +++ b/common/preset.cpp @@ -157,6 +157,21 @@ static std::map get_map_key_opt(common_params_context & return mapping; } +static bool is_bool_arg(const common_arg & arg) { + return !arg.args_neg.empty(); +} + +static std::string parse_bool_arg(const common_arg & arg, const std::string & key, const std::string & value) { + // if this is a negated arg, we need to reverse the value + for (const auto & neg_arg : arg.args_neg) { + if (rm_leading_dashes(neg_arg) == key) { + return common_arg_utils::is_truthy(value) ? "false" : "true"; + } + } + // otherwise, not negated + return value; +} + common_presets common_presets_load(const std::string & path, common_params_context & ctx_params) { common_presets out; auto key_to_opt = get_map_key_opt(ctx_params); @@ -173,8 +188,13 @@ common_presets common_presets_load(const std::string & path, common_params_conte for (const auto & [key, value] : section.second) { LOG_DBG("option: %s = %s\n", key.c_str(), value.c_str()); if (key_to_opt.find(key) != key_to_opt.end()) { - preset.options[key_to_opt[key]] = value; - LOG_DBG("accepted option: %s = %s\n", key.c_str(), value.c_str()); + auto & opt = key_to_opt[key]; + if (is_bool_arg(opt)) { + preset.options[opt] = parse_bool_arg(opt, key, value); + } else { + preset.options[opt] = value; + } + LOG_DBG("accepted option: %s = %s\n", key.c_str(), preset.options[opt].c_str()); } else { // TODO: maybe warn about unknown key? } From 745fa0e78b3d3fd3fd12a260c47300dc3869d07e Mon Sep 17 00:00:00 2001 From: piDack <104877312+piDack@users.noreply.github.com> Date: Mon, 15 Dec 2025 10:18:46 +0800 Subject: [PATCH 21/23] model : add glm-asr support (#17901) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [model] add glm-asr support * fix format for ci * fix convert format for ci * update glm_asr convert script & use build_ffn for glm_asr clip & use build_stack for padding and review * check root architecture for convert hf script * fix conficlt with upstream * fix convert script for glm asr & format clip-impl * format * restore hparams text * improved conversion --------- Co-authored-by: Sigbjørn Skjæret --- convert_hf_to_gguf.py | 86 ++++++++++++++++++++++++++++++- gguf-py/gguf/constants.py | 1 + tools/mtmd/clip-graph.h | 4 ++ tools/mtmd/clip-impl.h | 2 + tools/mtmd/clip-model.h | 1 + tools/mtmd/clip.cpp | 60 ++++++++++++++++++++- tools/mtmd/models/whisper-enc.cpp | 19 ++++--- 7 files changed, 160 insertions(+), 13 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 3f861f2a6a..ee02cdd91c 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -713,6 +713,9 @@ class ModelBase: if "llm_config" in config: # rename for InternVL config["text_config"] = config["llm_config"] + if "lm_config" in config: + # rename for GlmASR + config["text_config"] = config["lm_config"] if "thinker_config" in config: # rename for Qwen2.5-Omni config["text_config"] = config["thinker_config"]["text_config"] @@ -1529,6 +1532,21 @@ class TextModel(ModelBase): raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported") self.gguf_writer.add_pooling_type(pooling_type) + def _set_vocab_glmedge(self): + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(self.dir_model) + special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True) + tokens, toktypes, tokpre = self.get_vocab_base() + self.gguf_writer.add_tokenizer_model("gpt2") + self.gguf_writer.add_tokenizer_pre(tokpre) + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_types(toktypes) + special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"]) + special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) + special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) + special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"]) + special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_interns1(self): tokens: list[str] = [] toktypes: list[int] = [] @@ -1658,7 +1676,7 @@ class MmprojModel(ModelBase): preprocessor_config: dict[str, Any] global_config: dict[str, Any] - n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"] + n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth", "encoder_layers"] has_vision_encoder: bool = True # by default has_audio_encoder: bool = False @@ -1734,7 +1752,8 @@ class MmprojModel(ModelBase): return self.global_config.get(config_name) def get_audio_config(self) -> dict[str, Any] | None: - return self.global_config.get("audio_config") + mm_config_key = "whisper_config" if "whisper_config" in self.hparams else "audio_config" + return self.global_config.get(mm_config_key) def set_type(self): self.gguf_writer.add_type(gguf.GGUFType.MMPROJ) @@ -2372,8 +2391,13 @@ class LlamaModel(TextModel): # fix for SmolVLM2, missing `num_attention_heads` in config.json if self.hf_arch == "VLlama3ForCausalLM": self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32) + hparams = ModelBase.load_hparams(self.dir_model, is_mistral_format=False) + self.origin_hf_arch = hparams.get('architectures', [None])[0] def set_vocab(self): + if self.origin_hf_arch == "GlmasrModel": + return self._set_vocab_glmedge() + if self.is_mistral_format: return self._set_vocab_mistral() @@ -2444,6 +2468,7 @@ class LlamaModel(TextModel): "vision_language_adapter.", "patch_merger.", "pre_mm_projector_norm", + "audio_encoder.", ] is_multimodal_tensor = "vision_tower" in name \ @@ -8846,6 +8871,63 @@ class UltravoxModel(TextModel): raise NotImplementedError("Ultravox does not have text decoder. Instead, it uses Llama or other models for text. If you want to get the audio encoder, please use --mmproj argument") +@ModelBase.register("GlmasrModel") +class GlmASRWhisperEncoderModel(MmprojModel): + has_vision_encoder = False + has_audio_encoder = True + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if "hidden_size" not in self.hparams and "intermediate_size" not in self.hparams: + self.hparams["hidden_size"] = self.hparams["d_model"] + self.hparams["intermediate_size"] = self.hparams["encoder_ffn_dim"] + self.hparams["num_attention_heads"] = self.hparams["encoder_attention_heads"] + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.GLMA) + self.gguf_writer.add_audio_num_mel_bins(self.hparams["num_mel_bins"]) + self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5)) + self.gguf_writer.add_audio_stack_factor(self.global_config["merge_factor"]) + + def tensor_force_quant(self, name, new_name, bid, n_dims): + if ".conv" in name and ".weight" in name: + return gguf.GGMLQuantizationType.F16 + return super().tensor_force_quant(name, new_name, bid, n_dims) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + + if name.startswith("model.") or name.startswith("lm_head."): + # skip language model tensors + return [] + + if name.startswith("audio_encoder.whisper."): + name = name.replace("audio_encoder.whisper.","audio_tower.") + if "audio_encoder.layer_norm." in name or "audio_encoder.proj." in name: + name = name.replace("audio_encoder.", "audio_encoder.adapting.") + + if name.startswith("audio_encoder.audio_bos_eos_token."): + return [(self.map_tensor_name("model.vision.boi"), data_torch[0]), (self.map_tensor_name("model.vision.eoi"), data_torch[1])] + + if name.startswith("audio_encoder.adapting."): + name = name.replace("audio_encoder.adapting.","audio.multi_modal_projector.") + if ".layer_norm." in name: + name = name.replace(".layer_norm.", ".ln_pre.") + if ".0." in name: + name = name.replace(".0.", ".linear_1.") + if ".2." in name: + name = name.replace(".2.", ".linear_2.") + if ".proj." in name: + return [] + + if "conv1.bias" in name or "conv2.bias" in name: + # transpose conv1 and conv2 bias + data_torch = data_torch.unsqueeze(-1) + + return [(self.map_tensor_name(name), data_torch)] + + @ModelBase.register("Qwen2AudioForConditionalGeneration") class WhisperEncoderModel(MmprojModel): has_vision_encoder = False # no vision encoder diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 2b8489c591..8ef4a23a10 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -3320,6 +3320,7 @@ class VisionProjectorType: ULTRAVOX = "ultravox" INTERNVL = "internvl" QWEN2A = "qwen2a" # audio + GLMA = "glma" # audio QWEN25O = "qwen2.5o" # omni VOXTRAL = "voxtral" LFM2 = "lfm2" diff --git a/tools/mtmd/clip-graph.h b/tools/mtmd/clip-graph.h index 6d303b4e48..17f90e8aa8 100644 --- a/tools/mtmd/clip-graph.h +++ b/tools/mtmd/clip-graph.h @@ -112,4 +112,8 @@ struct clip_graph { // aka pixel_shuffle / pixel_unshuffle / patch_merger (Kimi-VL) // support dynamic resolution ggml_tensor * build_patch_merge_permute(ggml_tensor * cur, int scale_factor); + + // Generic function to stack frames for audio processing + // Abstracts out the StackAudioFrames logic used by ultravox + ggml_tensor * build_stack(ggml_tensor * cur, int32_t stack_factor, int32_t n_embed); }; diff --git a/tools/mtmd/clip-impl.h b/tools/mtmd/clip-impl.h index 1726823ec6..9ef0d301e4 100644 --- a/tools/mtmd/clip-impl.h +++ b/tools/mtmd/clip-impl.h @@ -157,6 +157,7 @@ enum projector_type { PROJECTOR_TYPE_INTERNVL, PROJECTOR_TYPE_LLAMA4, PROJECTOR_TYPE_QWEN2A, + PROJECTOR_TYPE_GLMA, PROJECTOR_TYPE_QWEN25O, // will be replaced by QWEN2A or QWEN25VL depending on clip_ctx PROJECTOR_TYPE_VOXTRAL, PROJECTOR_TYPE_LFM2, @@ -183,6 +184,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_INTERNVL, "internvl"}, { PROJECTOR_TYPE_LLAMA4, "llama4"}, { PROJECTOR_TYPE_QWEN2A, "qwen2a"}, + { PROJECTOR_TYPE_GLMA, "glma"}, { PROJECTOR_TYPE_QWEN25O, "qwen2.5o"}, { PROJECTOR_TYPE_VOXTRAL, "voxtral"}, { PROJECTOR_TYPE_LFM2, "lfm2"}, diff --git a/tools/mtmd/clip-model.h b/tools/mtmd/clip-model.h index 51bcce1ebb..bdd4f63e0f 100644 --- a/tools/mtmd/clip-model.h +++ b/tools/mtmd/clip-model.h @@ -256,6 +256,7 @@ struct clip_model { ggml_tensor * conv1d_2_w = nullptr; ggml_tensor * conv1d_2_b = nullptr; ggml_tensor * mm_norm_pre_w = nullptr; + ggml_tensor * mm_norm_pre_b = nullptr; ggml_tensor * mm_norm_mid_w = nullptr; // cogvlm diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 5cc4e4b1fa..c72c6a6381 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -720,6 +720,32 @@ ggml_tensor * clip_graph::build_rope_2d( return cur; } +// Generic function to stack frames for audio processing +// Abstracts out the StackAudioFrames logic used by ultravox +ggml_tensor * clip_graph::build_stack(ggml_tensor * cur, int32_t stack_factor, int32_t n_embed) { + if (stack_factor <= 1) { + return cur; + } + + int64_t total_elements = ggml_nelements(cur); + int64_t stride = n_embed * stack_factor; + + // Calculate padded length + int64_t padded_len = GGML_PAD(total_elements, stride); + int64_t pad = padded_len - total_elements; + + if (pad > 0) { + // Pad the tensor to make it divisible by stride + cur = ggml_view_1d(ctx0, cur, total_elements, 0); + cur = ggml_pad(ctx0, cur, pad, 0, 0, 0); + } + + // Reshape to [stride, padded_len / stride] + cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride, + ggml_row_size(cur->type, stride), 0); + return cur; +} + // aka pixel_shuffle / pixel_unshuffle / patch_merger (Kimi-VL) // support dynamic resolution ggml_tensor * clip_graph::build_patch_merge_permute(ggml_tensor * cur, int scale_factor) { @@ -796,6 +822,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 case PROJECTOR_TYPE_ULTRAVOX: case PROJECTOR_TYPE_VOXTRAL: case PROJECTOR_TYPE_QWEN2A: + case PROJECTOR_TYPE_GLMA: { builder = std::make_unique(ctx, img); } break; @@ -1136,10 +1163,12 @@ struct clip_model_loader { } break; case PROJECTOR_TYPE_ULTRAVOX: case PROJECTOR_TYPE_QWEN2A: + case PROJECTOR_TYPE_GLMA: case PROJECTOR_TYPE_VOXTRAL: { bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX || - model.proj_type == PROJECTOR_TYPE_VOXTRAL; + model.proj_type == PROJECTOR_TYPE_VOXTRAL || + model.proj_type == PROJECTOR_TYPE_GLMA; get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack); if (hparams.n_mel_bins != 128) { throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__)); @@ -1510,6 +1539,21 @@ struct clip_model_loader { model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight")); model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias")); } break; + case PROJECTOR_TYPE_GLMA: + { + model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight")); + model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias")); + model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight")); + model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias")); + model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight")); + model.mm_1_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "bias")); + model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight")); + model.mm_2_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "bias")); + model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight")); + model.mm_norm_pre_b = get_tensor(string_format(TN_MM_NORM_PRE, "bias")); + model.mm_boi = get_tensor(string_format(TN_TOK_BOI, "weight")); + model.mm_eoi = get_tensor(string_format(TN_TOK_EOI, "weight")); + } break; case PROJECTOR_TYPE_LLAMA4: { model.mm_model_proj = get_tensor(TN_MM_PROJECTOR); @@ -2895,6 +2939,16 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im n_patches /= 2; } } break; + case PROJECTOR_TYPE_GLMA: + { + n_patches = img->nx; + // whisper downscales input token by half after conv1d + n_patches /= 2; + // reshape by merge_factor + n_patches /= ctx->model.hparams.proj_stack_factor; + // for BOI and EOI token embeddings + n_patches += 2; + } break; case PROJECTOR_TYPE_COGVLM: { n_patches += 2; // for BOI and EOI token embeddings @@ -3230,6 +3284,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima case PROJECTOR_TYPE_IDEFICS3: case PROJECTOR_TYPE_INTERNVL: case PROJECTOR_TYPE_QWEN2A: + case PROJECTOR_TYPE_GLMA: case PROJECTOR_TYPE_ULTRAVOX: case PROJECTOR_TYPE_LFM2: case PROJECTOR_TYPE_VOXTRAL: @@ -3340,6 +3395,8 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) { return ctx->model.mm_model_proj->ne[1]; case PROJECTOR_TYPE_QWEN2A: return ctx->model.mm_fc_w->ne[1]; + case PROJECTOR_TYPE_GLMA: + return ctx->model.mm_2_w->ne[1]; case PROJECTOR_TYPE_LFM2: case PROJECTOR_TYPE_KIMIVL: return ctx->model.mm_2_w->ne[1]; @@ -3386,6 +3443,7 @@ bool clip_has_audio_encoder(const struct clip_ctx * ctx) { bool clip_has_whisper_encoder(const struct clip_ctx * ctx) { return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A + || ctx->proj_type() == PROJECTOR_TYPE_GLMA || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL; } diff --git a/tools/mtmd/models/whisper-enc.cpp b/tools/mtmd/models/whisper-enc.cpp index 07d378b095..2870d854ab 100644 --- a/tools/mtmd/models/whisper-enc.cpp +++ b/tools/mtmd/models/whisper-enc.cpp @@ -30,7 +30,6 @@ ggml_cgraph * clip_graph_whisper_enc::build() { GGML_ASSERT(model.layers[0].q_b); GGML_ASSERT(model.layers[0].v_b); GGML_ASSERT(!model.layers[0].k_b); // no bias for k - GGML_ASSERT(model.post_ln_w && model.post_ln_b); ggml_tensor * pos_embd_selected = ggml_view_2d( ctx0, model.position_embeddings, @@ -49,15 +48,7 @@ ggml_cgraph * clip_graph_whisper_enc::build() { if (model.audio_has_stack_frames()) { // StackAudioFrames // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py - int64_t stride = n_embd * hparams.proj_stack_factor; - int64_t padded_len = GGML_PAD(ggml_nelements(cur), stride); - int64_t pad = padded_len - ggml_nelements(cur); - if (pad > 0) { - cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0); - cur = ggml_pad(ctx0, cur, pad, 0, 0, 0); - } - cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride, - ggml_row_size(cur->type, stride), 0); + cur = build_stack(cur, hparams.proj_stack_factor, n_embd); cb(cur, "after_stacked", -1); } @@ -95,6 +86,14 @@ ggml_cgraph * clip_graph_whisper_enc::build() { FFN_GELU_ERF, -1); + } else if (proj_type == PROJECTOR_TYPE_GLMA) { + cur = ggml_norm(ctx0, cur, hparams.eps); + cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w); + cur = ggml_add(ctx0, cur, model.mm_norm_pre_b); + cur = build_stack(cur, hparams.proj_stack_factor, n_embd); + cur = build_ffn(cur, model.mm_1_w, model.mm_1_b, nullptr, nullptr, model.mm_2_w, model.mm_2_b, hparams.ffn_op, 0); + cur = ggml_concat(ctx0, model.mm_boi, cur, 1); + cur = ggml_concat(ctx0, cur, model.mm_eoi, 1); } else { GGML_ABORT("%s: unknown projector type", __func__); } From 4aced7a63156555911157d3002f9d3ddef4a1e55 Mon Sep 17 00:00:00 2001 From: Neo Zhang Jianyu Date: Mon, 15 Dec 2025 10:35:15 +0800 Subject: [PATCH 22/23] [SYCL] Support gpt-oss by OPs add-id, mul_mat for mxfp4, swiglu_oai (#17826) * support gpt-oss GPU by OP add-id, mul_mat for mxfp4, swiglu_oai, fix warning * fix fault ut case, update ops.md * rebase, fix format issue --- docs/ops.md | 18 +- docs/ops/SYCL.csv | 1158 ++++++++++++++++++--------- ggml/src/ggml-sycl/add-id.cpp | 77 ++ ggml/src/ggml-sycl/add-id.hpp | 8 + ggml/src/ggml-sycl/common.hpp | 17 + ggml/src/ggml-sycl/convert.cpp | 15 + ggml/src/ggml-sycl/dequantize.hpp | 18 + ggml/src/ggml-sycl/dpct/helper.hpp | 59 +- ggml/src/ggml-sycl/element_wise.cpp | 97 +++ ggml/src/ggml-sycl/element_wise.hpp | 4 + ggml/src/ggml-sycl/ggml-sycl.cpp | 23 +- ggml/src/ggml-sycl/mmvq.cpp | 22 + ggml/src/ggml-sycl/pad.cpp | 10 +- ggml/src/ggml-sycl/ssm_conv.cpp | 2 +- ggml/src/ggml-sycl/vecdotq.hpp | 58 ++ 15 files changed, 1201 insertions(+), 385 deletions(-) create mode 100644 ggml/src/ggml-sycl/add-id.cpp create mode 100644 ggml/src/ggml-sycl/add-id.hpp diff --git a/docs/ops.md b/docs/ops.md index 43163b39ac..b395d2315c 100644 --- a/docs/ops.md +++ b/docs/ops.md @@ -18,12 +18,12 @@ Legend: | ACC | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | | ADD | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | | ADD1 | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | -| ADD_ID | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | +| ADD_ID | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | | ARANGE | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | | ARGMAX | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | -| ARGSORT | ❌ | ✅ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | ❌ | ❌ | ❌ | +| ARGSORT | ❌ | ✅ | ✅ | ✅ | ✅ | 🟡 | 🟡 | ✅ | ❌ | ❌ | ❌ | | CEIL | ❌ | ❌ | ✅ | 🟡 | ❌ | ❌ | 🟡 | 🟡 | ❌ | ❌ | ❌ | -| CLAMP | ❌ | ✅ | ✅ | ✅ | 🟡 | 🟡 | 🟡 | 🟡 | ❌ | ❌ | ❌ | +| CLAMP | ❌ | ✅ | ✅ | ✅ | 🟡 | 🟡 | ✅ | 🟡 | ❌ | ❌ | ❌ | | CONCAT | ❌ | ✅ | ✅ | 🟡 | ✅ | 🟡 | ✅ | ✅ | ❌ | ❌ | ❌ | | CONT | ❌ | 🟡 | ✅ | ✅ | ✅ | 🟡 | 🟡 | ✅ | 🟡 | ❌ | ❌ | | CONV_2D | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | @@ -31,7 +31,7 @@ Legend: | CONV_3D | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | CONV_TRANSPOSE_1D | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | | CONV_TRANSPOSE_2D | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | -| COS | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | 🟡 | 🟡 | ❌ | ❌ | ❌ | +| COS | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | ✅ | 🟡 | ❌ | ❌ | ❌ | | COUNT_EQUAL | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | | CPY | ❌ | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | ❌ | ❌ | | CROSS_ENTROPY_LOSS | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | @@ -64,7 +64,7 @@ Legend: | IM2COL_3D | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | | L2_NORM | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | | LEAKY_RELU | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | ✅ | 🟡 | ❌ | ❌ | ❌ | -| LOG | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | 🟡 | ✅ | ❌ | ❌ | ❌ | +| LOG | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | | MEAN | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | | MUL | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | | MUL_MAT | 🟡 | 🟡 | 🟡 | 🟡 | ✅ | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | @@ -98,14 +98,14 @@ Legend: | SIGMOID | ❌ | ✅ | ✅ | 🟡 | 🟡 | 🟡 | ✅ | 🟡 | ✅ | ❌ | ❌ | | SILU | ❌ | ✅ | ✅ | 🟡 | 🟡 | 🟡 | ✅ | 🟡 | ✅ | ❌ | ❌ | | SILU_BACK | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | -| SIN | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | 🟡 | 🟡 | ❌ | ❌ | ❌ | +| SIN | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | ✅ | 🟡 | ❌ | ❌ | ❌ | | SOFTCAP | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | SOFTPLUS | ❌ | ❌ | ✅ | 🟡 | 🟡 | ❌ | ❌ | 🟡 | ❌ | ❌ | ❌ | | SOFT_MAX | ❌ | 🟡 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | | SOFT_MAX_BACK | ❌ | ❌ | 🟡 | 🟡 | ❌ | ❌ | 🟡 | ✅ | ❌ | ❌ | ❌ | | SOLVE_TRI | ❌ | ❌ | ✅ | 🟡 | ❌ | ❌ | ❌ | 🟡 | ❌ | ❌ | ❌ | -| SQR | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | 🟡 | 🟡 | ❌ | ❌ | ❌ | -| SQRT | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | 🟡 | 🟡 | ❌ | ❌ | ❌ | +| SQR | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | 🟡 | ❌ | ❌ | ❌ | +| SQRT | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | 🟡 | ❌ | ❌ | ❌ | | SSM_CONV | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | | SSM_SCAN | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | 🟡 | ❌ | ❌ | ❌ | | STEP | ❌ | ✅ | ✅ | 🟡 | 🟡 | ❌ | ✅ | 🟡 | ✅ | ❌ | ❌ | @@ -113,7 +113,7 @@ Legend: | SUM | ❌ | ✅ | ✅ | 🟡 | 🟡 | ❌ | 🟡 | 🟡 | ❌ | ❌ | ❌ | | SUM_ROWS | ❌ | ✅ | ✅ | 🟡 | ✅ | 🟡 | 🟡 | ✅ | ❌ | ❌ | ❌ | | SWIGLU | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | 🟡 | ✅ | ❌ | ❌ | -| SWIGLU_OAI | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | 🟡 | ✅ | ❌ | ❌ | +| SWIGLU_OAI | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | 🟡 | ✅ | ❌ | ❌ | | TANH | ❌ | ✅ | ✅ | 🟡 | 🟡 | ✅ | ✅ | 🟡 | ✅ | ❌ | ❌ | | TIMESTEP_EMBEDDING | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | | TOP_K | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ | 🟡 | ❌ | ❌ | ❌ | diff --git a/docs/ops/SYCL.csv b/docs/ops/SYCL.csv index 85a45d6ae0..91b442bde8 100644 --- a/docs/ops/SYCL.csv +++ b/docs/ops/SYCL.csv @@ -33,14 +33,14 @@ "SYCL0","SOFTPLUS","type=f16,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" "SYCL0","GELU_ERF","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" "SYCL0","GELU_ERF","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" -"SYCL0","FLOOR","type=f16,ne_a=[128,2,2,2],v=0","support","0","no","SYCL" -"SYCL0","FLOOR","type=f16,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" -"SYCL0","CEIL","type=f16,ne_a=[128,2,2,2],v=0","support","0","no","SYCL" -"SYCL0","CEIL","type=f16,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" -"SYCL0","ROUND","type=f16,ne_a=[128,2,2,2],v=0","support","0","no","SYCL" -"SYCL0","ROUND","type=f16,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" -"SYCL0","TRUNC","type=f16,ne_a=[128,2,2,2],v=0","support","0","no","SYCL" -"SYCL0","TRUNC","type=f16,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" +"SYCL0","FLOOR","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","FLOOR","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" "SYCL0","ABS","type=f16,ne_a=[128,2,2,2],v=1","support","1","yes","SYCL" "SYCL0","ABS","type=f16,ne_a=[5,7,11,13],v=1","support","1","yes","SYCL" "SYCL0","SGN","type=f16,ne_a=[128,2,2,2],v=1","support","1","yes","SYCL" @@ -287,14 +287,14 @@ "SYCL0","GEGLU_QUICK","type=f32,ne_a=[5,7,11,13],v=1,swapped=1","support","1","yes","SYCL" "SYCL0","GEGLU_QUICK","type=f32,ne_a=[128,2,2,2],v=1,split","support","1","yes","SYCL" "SYCL0","GEGLU_QUICK","type=f32,ne_a=[5,7,11,13],v=1,split","support","1","yes","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=0.500000,limit=2.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=0.500000,limit=7.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=1.702000,limit=2.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=1.702000,limit=7.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=0.500000,limit=2.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=0.500000,limit=7.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=1.702000,limit=2.000000","support","0","no","SYCL" -"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=1.702000,limit=7.000000","support","0","no","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=0.500000,limit=2.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=0.500000,limit=7.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=1.702000,limit=2.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=0,alpha=1.702000,limit=7.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=0.500000,limit=2.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=0.500000,limit=7.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=1.702000,limit=2.000000","support","1","yes","SYCL" +"SYCL0","SWIGLU_OAI","type=f32,ne_a=[128,2,2,2],v=1,alpha=1.702000,limit=7.000000","support","1","yes","SYCL" "SYCL0","GET_ROWS","type=f32,n=76800,m=5,r=4,be1=1,be2=2,v=0","support","1","yes","SYCL" "SYCL0","GET_ROWS","type=f32,n=256,m=80000,r=70000,be1=2,be2=1,v=0","support","1","yes","SYCL" "SYCL0","GET_ROWS","type=f32,n=256,m=5,r=4,be1=700,be2=100,v=0","support","1","yes","SYCL" @@ -4964,6 +4964,7 @@ "SYCL0","CONV_TRANSPOSE_1D","ne_input=[2,1,1,1],ne_kernel=[3,1,1,1],s0=1,p0=0,d0=1","support","1","yes","SYCL" "SYCL0","CONV_TRANSPOSE_2D","ne_input=[3,2,3,1],ne_kernel=[2,2,1,3],stride=1","support","0","no","SYCL" "SYCL0","CONV_TRANSPOSE_2D","ne_input=[10,10,9,1],ne_kernel=[3,3,1,9],stride=2","support","0","no","SYCL" +"SYCL0","CONV_TRANSPOSE_2D","ne_input=[129,63,35,1],ne_kernel=[3,3,48,35],stride=1","support","0","no","SYCL" "SYCL0","COUNT_EQUAL","type=f32,ne=[4,500,1,1]","support","1","yes","SYCL" "SYCL0","COUNT_EQUAL","type=f32,ne=[4,5000,1,1]","support","1","yes","SYCL" "SYCL0","ARGMAX","type=f32,ne=[32,1,1,1]","support","1","yes","SYCL" @@ -5419,17 +5420,45 @@ "SYCL0","CPY","type_src=f16,type_dst=f16,ne=[256,4,1,1],permute_src=[0,0,0,0],permute_dst=[0,0,0,0],_src_transpose=1","support","1","yes","SYCL" "SYCL0","CPY","type_src=f32,type_dst=f32,ne=[256,4,1,1],permute_src=[0,0,0,0],permute_dst=[0,0,0,0],_src_transpose=1","support","1","yes","SYCL" "SYCL0","CPY","type_src=bf16,type_dst=bf16,ne=[256,4,1,1],permute_src=[0,0,0,0],permute_dst=[0,0,0,0],_src_transpose=1","support","0","no","SYCL" +"SYCL0","CPY","type_src=i32,type_dst=i32,ne=[256,4,1,1],permute_src=[0,0,0,0],permute_dst=[0,0,0,0],_src_transpose=1","support","0","no","SYCL" +"SYCL0","CPY","type_src=i32,type_dst=i32,ne=[256,1,4,1],permute_src=[1,2,0,3],permute_dst=[0,0,0,0],_src_transpose=0","support","0","no","SYCL" "SYCL0","CPY","type_src=f32,type_dst=f32,ne=[256,1,4,1],permute_src=[1,2,0,3],permute_dst=[0,0,0,0],_src_transpose=0","support","1","yes","SYCL" -"SYCL0","CONT","type=f32,ne=[10,10,10,1]","support","1","yes","SYCL" -"SYCL0","CONT","type=f32,ne=[2,1,1,1]","support","1","yes","SYCL" -"SYCL0","CONT","type=f32,ne=[2,1,3,5]","support","1","yes","SYCL" -"SYCL0","CONT","type=f32,ne=[2,3,5,7]","support","1","yes","SYCL" -"SYCL0","CONT","type=f16,ne=[2,1,1,1]","support","1","yes","SYCL" -"SYCL0","CONT","type=f16,ne=[2,1,3,5]","support","1","yes","SYCL" -"SYCL0","CONT","type=f16,ne=[2,3,5,7]","support","1","yes","SYCL" -"SYCL0","CONT","type=bf16,ne=[2,1,1,1]","support","0","no","SYCL" -"SYCL0","CONT","type=bf16,ne=[2,1,3,5]","support","0","no","SYCL" -"SYCL0","CONT","type=bf16,ne=[2,3,5,7]","support","0","no","SYCL" +"SYCL0","CONT","type=f32,ne=[2,1,1,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[2,1,3,5],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[2,3,5,7],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[1,4,4,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[1,8,17,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[10,10,10,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[2,1,1,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[2,1,3,5],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[2,3,5,7],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[1,4,4,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[1,8,17,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f32,ne=[10,10,10,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[2,1,1,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[2,1,3,5],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[2,3,5,7],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[1,4,4,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[1,8,17,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[10,10,10,1],use_view_slice=1","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[2,1,1,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[2,1,3,5],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[2,3,5,7],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[1,4,4,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[1,8,17,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=i32,ne=[10,10,10,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f16,ne=[2,1,1,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f16,ne=[2,1,3,5],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f16,ne=[2,3,5,7],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f16,ne=[1,4,4,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f16,ne=[1,8,17,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=f16,ne=[10,10,10,1],use_view_slice=0","support","1","yes","SYCL" +"SYCL0","CONT","type=bf16,ne=[2,1,1,1],use_view_slice=0","support","0","no","SYCL" +"SYCL0","CONT","type=bf16,ne=[2,1,3,5],use_view_slice=0","support","0","no","SYCL" +"SYCL0","CONT","type=bf16,ne=[2,3,5,7],use_view_slice=0","support","0","no","SYCL" +"SYCL0","CONT","type=bf16,ne=[1,4,4,1],use_view_slice=0","support","0","no","SYCL" +"SYCL0","CONT","type=bf16,ne=[1,8,17,1],use_view_slice=0","support","0","no","SYCL" +"SYCL0","CONT","type=bf16,ne=[10,10,10,1],use_view_slice=0","support","0","no","SYCL" "SYCL0","ADD","type=f16,ne=[1,1,8,1],nr=[1,1,1,1],nf=1","support","1","yes","SYCL" "SYCL0","SUB","type=f16,ne=[1,1,8,1],nr=[1,1,1,1],nf=1","support","1","yes","SYCL" "SYCL0","MUL","type=f16,ne=[1,1,8,1],nr=[1,1,1,1],nf=1","support","1","yes","SYCL" @@ -5655,6 +5684,7 @@ "SYCL0","MUL","type=f32,ne=[64,262144,1,1],nr=[1,1,1,1],nf=1","support","1","yes","SYCL" "SYCL0","DIV","type=f32,ne=[64,262144,1,1],nr=[1,1,1,1],nf=1","support","1","yes","SYCL" "SYCL0","ADD1","type=f32,ne=[10,5,4,3]","support","1","yes","SYCL" +"SYCL0","ADD1","type=f32,ne=[1024,1024,1,1]","support","1","yes","SYCL" "SYCL0","SCALE","type=f32,ne=[10,10,10,10],scale=2.000000,bias=0.000000,inplace=0","support","1","yes","SYCL" "SYCL0","SCALE","type=f32,ne=[10,10,10,10],scale=2.000000,bias=1.000000,inplace=0","support","1","yes","SYCL" "SYCL0","SCALE","type=f32,ne=[10,10,10,10],scale=2.000000,bias=1.000000,inplace=1","support","1","yes","SYCL" @@ -5791,15 +5821,15 @@ "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=7,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=9,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=2,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=3,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=4,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=5,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=6,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=7,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=9,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=2,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=3,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=4,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=5,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=6,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=7,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=9,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q2_K,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q2_K,type_b=f32,m=16,n=2,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q2_K,type_b=f32,m=16,n=3,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -5944,15 +5974,15 @@ "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" @@ -5971,15 +6001,15 @@ "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6002,15 +6032,15 @@ "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" @@ -6029,15 +6059,15 @@ "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=4,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6176,15 +6206,15 @@ "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6207,15 +6237,15 @@ "SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q8_0,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6238,15 +6268,15 @@ "SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6269,15 +6299,15 @@ "SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_0,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6300,15 +6330,15 @@ "SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6331,15 +6361,15 @@ "SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_1,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6362,15 +6392,15 @@ "SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6393,81 +6423,81 @@ "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=q4_K,type_b=f16,m=16,n=8,k=256,bs=[1536,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[1536,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f32,m=16,n=8,k=256,bs=[1536,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[3,2],nr=[2,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[1536,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=mxfp4,type_b=f16,m=16,n=8,k=256,bs=[1536,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[2,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,2],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" @@ -6489,12 +6519,12 @@ "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f32,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6520,12 +6550,12 @@ "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=1,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,1,3,2],k_v=0,o=1","support","1","yes","SYCL" +"SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=256,bs=[2,3],nr=[1,1],per=[0,3,2,1],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=1,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=8,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=iq2_xxs,type_b=f16,m=16,n=16,k=1024,bs=[3,2],nr=[1,1],per=[0,1,2,3],k_v=0,o=1","support","1","yes","SYCL" @@ -6807,145 +6837,145 @@ "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1056,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1056,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1056,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1057,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1057,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1057,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1056,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1056,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1056,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1057,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1057,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1057,bs=[2,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1056,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1056,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1056,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1057,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1057,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1057,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1056,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1056,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1056,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1057,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1057,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[2,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1057,bs=[2,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1056,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1056,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1056,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1057,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1057,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1057,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1056,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1056,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1056,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1057,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1057,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1057,bs=[4,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1056,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1056,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1056,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1057,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1057,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1057,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1056,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1056,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1056,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1057,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1057,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[4,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1057,bs=[4,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1056,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1056,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1056,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1057,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1057,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1057,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1056,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1056,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1056,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1057,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1057,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[1,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1057,bs=[8,3],nr=[1,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1056,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1056,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1056,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=128,n=1,k=1057,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=128,n=1,k=1057,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1056,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=128,n=1,k=1057,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1056,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1056,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=128,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1056,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2112,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=f16,type_b=f32,m=129,n=1,k=1057,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" "SYCL0","MUL_MAT","type_a=bf16,type_b=f32,m=129,n=1,k=1057,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","0","no","SYCL" -"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","0","no","SYCL" +"SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=1057,n=1,k=129,bs=[8,3],nr=[4,1],per=[0,2,1,3],k_v=0,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT","type_a=f32,type_b=f32,m=129,n=1,k=1057,bs=[8,3],nr=[4,1],per=[0,1,2,3],k_v=2113,o=1","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f16,type_b=f32,n_mats=16,n_used=16,b=0,m=32,n=1024,k=16","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f16,type_b=f32,n_mats=2,n_used=2,b=0,m=32,n=8192,k=64","support","1","yes","SYCL" @@ -6954,7 +6984,7 @@ "SYCL0","MUL_MAT_ID","type_a=f16,type_b=f32,n_mats=2,n_used=2,b=1,m=32,n=8192,k=64","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f16,type_b=f32,n_mats=16,n_used=16,b=1,m=50,n=200,k=64","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f16,type_b=f32,n_mats=1,n_used=1,b=0,m=8,n=16,k=1","support","1","yes","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=32,n_used=2,b=0,m=2880,n=32,k=2880","support","0","no","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=32,n_used=2,b=0,m=2880,n=32,k=2880","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f32,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=1,k=256","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f32,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=4,k=256","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=f32,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=5,k=256","support","1","yes","SYCL" @@ -7387,78 +7417,78 @@ "SYCL0","MUL_MAT_ID","type_a=q4_K,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=17,k=256","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=q4_K,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=32,k=256","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=q4_K,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=129,k=256","support","1","yes","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=129,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=1,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=4,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=5,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=17,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=32,k=256","support","0","no","SYCL" -"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=129,k=256","support","0","no","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=1,b=1,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=0,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=2,b=1,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=0,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=4,n_used=4,b=1,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=0,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=1,b=1,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=0,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=2,b=1,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=0,m=512,n=129,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=1,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=4,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=5,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=17,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=32,k=256","support","1","yes","SYCL" +"SYCL0","MUL_MAT_ID","type_a=mxfp4,type_b=f32,n_mats=8,n_used=4,b=1,m=512,n=129,k=256","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=iq2_xxs,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=1,k=256","support","1","yes","SYCL" "SYCL0","MUL_MAT_ID","type_a=iq2_xxs,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=4,k=256","support","0","no","SYCL" "SYCL0","MUL_MAT_ID","type_a=iq2_xxs,type_b=f32,n_mats=4,n_used=1,b=0,m=512,n=5,k=256","support","0","no","SYCL" @@ -8589,64 +8619,68 @@ "SYCL0","OUT_PROD","type_a=iq2_xxs,type_b=f16,m=256,n=16,k=16,bs=[3,3],nr=[1,2],trans_b=0","support","0","no","SYCL" "SYCL0","OUT_PROD","type_a=iq2_xxs,type_b=f16,m=256,n=16,k=16,bs=[3,3],nr=[2,1],trans_b=0","support","0","no","SYCL" "SYCL0","OUT_PROD","type_a=iq2_xxs,type_b=f16,m=256,n=16,k=16,bs=[3,3],nr=[2,2],trans_b=0","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=1,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=1,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=1,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=1,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=1,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=1,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=2,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=2,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=2,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=2,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=2,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=2,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=4,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=4,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=4,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=4,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=4,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=4,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=1,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=1,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=1,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=1,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=1,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=1,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=2,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=2,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=2,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=2,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=2,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=2,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=4,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=4,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=4,n_token=129","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=4,n_token=1","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=4,n_token=32","support","0","no","SYCL" -"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=4,n_token=129","support","0","no","SYCL" -"SYCL0","SQR","type=f16,ne=[10,5,4,3]","support","0","no","SYCL" -"SYCL0","SQRT","type=f16,ne=[10,3,3,2]","support","0","no","SYCL" -"SYCL0","LOG","type=f16,ne=[10,5,4,3]","support","0","no","SYCL" -"SYCL0","SIN","type=f16,ne=[10,2,2,2]","support","0","no","SYCL" -"SYCL0","COS","type=f16,ne=[10,2,2,2]","support","0","no","SYCL" -"SYCL0","CLAMP","type=f16,ne=[10,5,4,3],min=-0.500000,max=0.500000","support","0","no","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=1,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=1,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=1,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=1,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=1,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=1,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=2,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=2,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=2,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=2,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=2,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=2,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=4,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=4,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=4,n_experts_used=4,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=4,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=4,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=4,n_experts_used=4,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=1,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=1,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=1,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=1,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=1,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=1,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=2,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=2,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=2,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=2,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=2,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=2,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=4,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=4,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=32,n_experts=8,n_experts_used=4,n_token=129","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=4,n_token=1","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=4,n_token=32","support","1","yes","SYCL" +"SYCL0","ADD_ID","type_a=f32,type_b=f32,n_embd=129,n_experts=8,n_experts_used=4,n_token=129","support","1","yes","SYCL" +"SYCL0","SQR","type=f16,ne=[10,5,4,3]","support","1","yes","SYCL" +"SYCL0","SQRT","type=f16,ne=[10,3,3,2]","support","1","yes","SYCL" +"SYCL0","LOG","type=f16,ne=[10,5,4,3]","support","1","yes","SYCL" +"SYCL0","SIN","type=f16,ne=[10,2,2,2]","support","1","yes","SYCL" +"SYCL0","COS","type=f16,ne=[10,2,2,2]","support","1","yes","SYCL" +"SYCL0","CLAMP","type=f16,ne=[10,5,4,3],min=-0.500000,max=0.500000","support","1","yes","SYCL" "SYCL0","LEAKY_RELU","type=f16,ne_a=[10,5,4,3],negative_slope=0.100000","support","1","yes","SYCL" -"SYCL0","FLOOR","type=f16,ne=[10,2,2,2]","support","0","no","SYCL" -"SYCL0","CEIL","type=f16,ne=[10,2,2,2]","support","0","no","SYCL" -"SYCL0","ROUND","type=f16,ne=[10,2,2,2]","support","0","no","SYCL" -"SYCL0","TRUNC","type=f16,ne=[10,2,2,2]","support","0","no","SYCL" -"SYCL0","SQR","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","SQRT","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","LOG","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","SIN","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","COS","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","CLAMP","type=f16,ne=[7,1,5,3],min=-0.500000,max=0.500000","support","0","no","SYCL" +"SYCL0","FLOOR","type=f16,ne=[10,2,2,2]","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne=[10,2,2,2]","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne=[10,2,2,2]","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne=[10,2,2,2]","support","1","yes","SYCL" +"SYCL0","SQR","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","SQRT","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","LOG","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","SIN","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","COS","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","CLAMP","type=f16,ne=[7,1,5,3],min=-0.500000,max=0.500000","support","1","yes","SYCL" "SYCL0","LEAKY_RELU","type=f16,ne_a=[7,1,5,3],negative_slope=0.100000","support","1","yes","SYCL" -"SYCL0","FLOOR","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","CEIL","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","ROUND","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" -"SYCL0","TRUNC","type=f16,ne=[7,1,5,3]","support","0","no","SYCL" +"SYCL0","FLOOR","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","FLOOR","type=f16,ne=[1024,1024,1,1]","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne=[1024,1024,1,1]","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne=[1024,1024,1,1]","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne=[1024,1024,1,1]","support","1","yes","SYCL" "SYCL0","SQR","type=f32,ne=[10,5,4,3]","support","1","yes","SYCL" "SYCL0","SQRT","type=f32,ne=[10,3,3,2]","support","1","yes","SYCL" "SYCL0","LOG","type=f32,ne=[10,5,4,3]","support","1","yes","SYCL" @@ -8666,9 +8700,13 @@ "SYCL0","CLAMP","type=f32,ne=[7,1,5,3],min=-0.500000,max=0.500000","support","1","yes","SYCL" "SYCL0","LEAKY_RELU","type=f32,ne_a=[7,1,5,3],negative_slope=0.100000","support","1","yes","SYCL" "SYCL0","FLOOR","type=f32,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","FLOOR","type=f32,ne=[1024,1024,1,1]","support","1","yes","SYCL" "SYCL0","CEIL","type=f32,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","CEIL","type=f32,ne=[1024,1024,1,1]","support","1","yes","SYCL" "SYCL0","ROUND","type=f32,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","ROUND","type=f32,ne=[1024,1024,1,1]","support","1","yes","SYCL" "SYCL0","TRUNC","type=f32,ne=[7,1,5,3]","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f32,ne=[1024,1024,1,1]","support","1","yes","SYCL" "SYCL0","DIAG_MASK_INF","type=f32,ne=[10,10,1,1],n_past=5","support","1","yes","SYCL" "SYCL0","DIAG_MASK_INF","type=f32,ne=[10,10,3,1],n_past=5","support","1","yes","SYCL" "SYCL0","DIAG_MASK_INF","type=f32,ne=[10,10,3,2],n_past=5","support","1","yes","SYCL" @@ -9411,28 +9449,405 @@ "SYCL0","CONCAT","type=i32,ne_a=[11,12,13,14],ne_b_d=7,dim=2,v=3","support","1","yes","SYCL" "SYCL0","CONCAT","type=f32,ne_a=[11,12,13,14],ne_b_d=7,dim=3,v=3","support","1","yes","SYCL" "SYCL0","CONCAT","type=i32,ne_a=[11,12,13,14],ne_b_d=7,dim=3,v=3","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[3,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[4,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[7,1,1,1],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[8,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[15,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[16,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[31,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[32,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[63,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[64,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[127,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[128,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[255,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[256,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[511,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[512,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1023,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1024,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[2047,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[2048,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[4095,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[4096,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[8191,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[8192,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[16383,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[16384,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[32767,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[32768,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[65535,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[65536,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[131071,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[131072,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[262143,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[262144,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[524287,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[524288,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1048575,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1048576,1,1,1],order=0","support","0","no","SYCL" "SYCL0","ARGSORT","type=f32,ne=[16,10,10,10],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[60,10,10,10],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[1023,2,1,3],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[1024,2,1,3],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[1025,2,1,3],order=0","support","1","yes","SYCL" -"SYCL0","ARGSORT","type=f32,ne=[16384,1,1,1],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2047,2,1,3],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2048,2,1,3],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2049,2,1,3],order=0","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2,8,8192,1],order=0","support","1","yes","SYCL" -"SYCL0","ARGSORT","type=f32,ne=[8,1,1,1],order=1","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[3,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[4,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[7,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[8,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[15,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[16,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[31,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[32,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[63,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[64,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[127,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[128,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[255,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[256,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[511,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[512,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1023,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1024,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[2047,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[2048,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[4095,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[4096,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[8191,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[8192,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[16383,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[16384,1,1,1],order=0","support","1","yes","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[32767,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[32768,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[65535,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[65536,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[131071,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[131072,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[262143,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[262144,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[524287,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[524288,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1048575,1,1,1],order=0","support","0","no","SYCL" +"SYCL0","ARGSORT","type=f32,ne=[1048576,1,1,1],order=0","support","0","no","SYCL" "SYCL0","ARGSORT","type=f32,ne=[16,10,10,10],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[60,10,10,10],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[1023,2,1,3],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[1024,2,1,3],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[1025,2,1,3],order=1","support","1","yes","SYCL" -"SYCL0","ARGSORT","type=f32,ne=[16384,1,1,1],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2047,2,1,3],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2048,2,1,3],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2049,2,1,3],order=1","support","1","yes","SYCL" "SYCL0","ARGSORT","type=f32,ne=[2,8,8192,1],order=1","support","1","yes","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[12,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[13,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[13,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[15,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[15,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[15,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[19,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[19,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[19,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[19,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[27,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[27,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[27,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[27,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[27,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[43,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[43,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[43,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[43,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[43,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[64,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[75,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[64,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[75,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[64,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[75,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[64,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[75,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[64,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[75,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[128,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[139,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[128,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[139,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[128,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[139,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[128,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[139,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[128,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[139,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[128,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[139,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[256,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[267,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[256,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[267,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[256,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[267,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[256,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[267,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[256,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[267,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[256,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[267,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[512,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[523,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1035,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2059,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4096,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[4107,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8192,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[8203,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16395,1,2,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32768,1,1,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[32779,1,2,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65536,1,1,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[65547,1,2,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131072,1,1,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[131083,1,2,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262144,1,1,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[262155,1,2,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=100,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=500,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=1023,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524288,1,1,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[524299,1,2,1],k=9999,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,10,10,10],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[60,10,10,10],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1023,2,1,3],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,2,1,3],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1025,2,1,3],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2047,2,1,3],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,2,1,3],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2049,2,1,3],k=1,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,10,10,10],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[60,10,10,10],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1023,2,1,3],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,2,1,3],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1025,2,1,3],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2047,2,1,3],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,2,1,3],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2049,2,1,3],k=2,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,10,10,10],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[60,10,10,10],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1023,2,1,3],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,2,1,3],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1025,2,1,3],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2047,2,1,3],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,2,1,3],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2049,2,1,3],k=3,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,10,10,10],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[60,10,10,10],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1023,2,1,3],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,2,1,3],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1025,2,1,3],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2047,2,1,3],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,2,1,3],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2049,2,1,3],k=7,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16,10,10,10],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[60,10,10,10],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1023,2,1,3],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1024,2,1,3],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[1025,2,1,3],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[16384,1,1,1],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2047,2,1,3],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2048,2,1,3],k=15,ties=0","support","0","no","SYCL" +"SYCL0","TOP_K","type=f32,ne=[2049,2,1,3],k=15,ties=0","support","0","no","SYCL" "SYCL0","UPSCALE","type=f32,ne=[512,512,3,2],scale_factor=2,mode=nearest,transpose=0","support","1","yes","SYCL" "SYCL0","UPSCALE","type=f32,ne=[512,512,3,2],scale_factor=2,mode=nearest,transpose=1","support","1","yes","SYCL" "SYCL0","UPSCALE","type=f32,ne=[2,5,7,11],ne_tgt=[5,7,11,13],mode=nearest,flags=none","support","1","yes","SYCL" @@ -9445,6 +9860,10 @@ "SYCL0","UPSCALE","type=f32,ne=[512,512,3,2],scale_factor=2,mode=bicubic,transpose=1","support","0","no","SYCL" "SYCL0","UPSCALE","type=f32,ne=[2,5,7,11],ne_tgt=[5,7,11,13],mode=bicubic,flags=none","support","0","no","SYCL" "SYCL0","UPSCALE","type=f32,ne=[5,7,11,13],ne_tgt=[2,5,7,11],mode=bicubic,flags=none","support","0","no","SYCL" +"SYCL0","UPSCALE","type=f32,ne=[512,512,3,2],scale_factor=2,mode=513,transpose=0","support","0","no","SYCL" +"SYCL0","UPSCALE","type=f32,ne=[512,512,3,2],scale_factor=2,mode=513,transpose=1","support","0","no","SYCL" +"SYCL0","UPSCALE","type=f32,ne=[2,5,7,11],ne_tgt=[5,7,11,13],mode=bilinear,flags=none","support","0","no","SYCL" +"SYCL0","UPSCALE","type=f32,ne=[5,7,11,13],ne_tgt=[2,5,7,11],mode=bilinear,flags=none","support","0","no","SYCL" "SYCL0","UPSCALE","type=f32,ne=[2,5,7,11],ne_tgt=[5,7,11,13],mode=bilinear,flags=align_corners","support","0","no","SYCL" "SYCL0","UPSCALE","type=f32,ne=[1,4,3,2],ne_tgt=[2,8,3,2],mode=bilinear,flags=align_corners","support","0","no","SYCL" "SYCL0","UPSCALE","type=f32,ne=[4,1,3,2],ne_tgt=[1,1,3,2],mode=bilinear,flags=align_corners","support","0","no","SYCL" @@ -9479,9 +9898,23 @@ "SYCL0","PAD_REFLECT_1D","type=f32,ne_a=[3000,384,4,1],pad_0=10,pad_1=9","support","1","yes","SYCL" "SYCL0","ROLL","shift0=3,shift1=-2,shift3=1,shift4=-1","support","1","yes","SYCL" "SYCL0","ARANGE","type=f32,start=0.000000,stop=10.000000,step=1.000000","support","1","yes","SYCL" +"SYCL0","ARANGE","type=f32,start=0.000000,stop=1048576.000000,step=1.000000","support","1","yes","SYCL" "SYCL0","TIMESTEP_EMBEDDING","type=f32,ne_a=[2,1,1,1],dim=320,max_period=10000","support","1","yes","SYCL" "SYCL0","LEAKY_RELU","type=f32,ne_a=[10,5,4,3],negative_slope=0.100000","support","1","yes","SYCL" "SYCL0","CUMSUM","type=f32,ne=[10,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[127,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[128,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[128,128,4,4]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[255,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[256,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[511,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[512,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[1023,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[1024,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[2047,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[2048,5,4,3]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[242004,1,1,1]","support","0","no","SYCL" +"SYCL0","CUMSUM","type=f32,ne=[375960,1,1,1]","support","0","no","SYCL" "SYCL0","XIELU","type=f32,ne=[10,5,4,3]","support","0","no","SYCL" "SYCL0","TRI","type=f32,ne=[10,10,4,3],tri_type=3","support","0","no","SYCL" "SYCL0","TRI","type=f32,ne=[10,10,4,3],tri_type=2","support","0","no","SYCL" @@ -9490,6 +9923,7 @@ "SYCL0","FILL","type=f32,ne=[10,10,4,3],c=0.000000","support","0","no","SYCL" "SYCL0","FILL","type=f32,ne=[303,207,11,3],c=2.000000","support","0","no","SYCL" "SYCL0","FILL","type=f32,ne=[800,600,4,4],c=-152.000000","support","0","no","SYCL" +"SYCL0","FILL","type=f32,ne=[2048,512,2,2],c=3.500000","support","0","no","SYCL" "SYCL0","SOLVE_TRI","type=f32,ne_lhs=[10,10,4,3],ne_rhs=[3,10,4,3]","support","0","no","SYCL" "SYCL0","SOLVE_TRI","type=f32,ne_lhs=[11,11,1,1],ne_rhs=[5,11,1,1]","support","0","no","SYCL" "SYCL0","SOLVE_TRI","type=f32,ne_lhs=[17,17,2,4],ne_rhs=[9,17,2,4]","support","0","no","SYCL" @@ -9497,6 +9931,8 @@ "SYCL0","SOLVE_TRI","type=f32,ne_lhs=[42,42,5,2],ne_rhs=[10,42,5,2]","support","0","no","SYCL" "SYCL0","SOLVE_TRI","type=f32,ne_lhs=[64,64,2,2],ne_rhs=[10,64,2,2]","support","0","no","SYCL" "SYCL0","SOLVE_TRI","type=f32,ne_lhs=[100,100,4,4],ne_rhs=[41,100,4,4]","support","0","no","SYCL" +"SYCL0","SOLVE_TRI","type=f32,ne_lhs=[128,128,4,4],ne_rhs=[31,128,4,4]","support","0","no","SYCL" +"SYCL0","SOLVE_TRI","type=f32,ne_lhs=[64,64,4,4],ne_rhs=[300,64,4,4]","support","0","no","SYCL" "SYCL0","PAD","type=f32,ne_a=[512,512,1,1],lp0=0,rp0=1,lp1=0,rp1=1,lp2=0,rp2=0,lp3=0,rp3=0,v=0","support","1","yes","SYCL" "SYCL0","PAD","type=f32,ne_a=[11,22,33,44],lp0=1,rp0=2,lp1=3,rp1=4,lp2=5,rp2=6,lp3=7,rp3=8,v=0","support","1","yes","SYCL" "SYCL0","PAD","type=f32,ne_a=[512,512,1,1],lp0=0,rp0=1,lp1=0,rp1=1,lp2=0,rp2=0,lp3=0,rp3=0,v=1","support","0","no","SYCL" diff --git a/ggml/src/ggml-sycl/add-id.cpp b/ggml/src/ggml-sycl/add-id.cpp new file mode 100644 index 0000000000..00c073cf93 --- /dev/null +++ b/ggml/src/ggml-sycl/add-id.cpp @@ -0,0 +1,77 @@ +#include +#include "common.hpp" +#include "add-id.hpp" + +static void add_id_kernel( + const float* src0, + const float* src1, + const int32_t* src2, + float* dst, + int64_t ne0, + int64_t ne1, + size_t nb01, + size_t nb02, + size_t nb11, + size_t nb21, + sycl::nd_item<3> item_ct1) { + const int64_t i1 = item_ct1.get_group(2); + const int64_t i2 = item_ct1.get_group(1); + + const int i11 = + *(const int32_t*)((const char*)src2 + i1 * sizeof(int32_t) + i2 * nb21); + + const size_t nb1 = ne0 * sizeof(float); + const size_t nb2 = ne1 * nb1; + + float* dst_row = (float*)((char*)dst + i1 * nb1 + i2 * nb2); + const float* src0_row = + (const float*)((const char*)src0 + i1 * nb01 + i2 * nb02); + const float* src1_row = (const float*)((const char*)src1 + i11 * nb11); + + for (int64_t i0 = item_ct1.get_local_id(2); i0 < ne0; + i0 += item_ct1.get_local_range(2)) { + dst_row[i0] = src0_row[i0] + src1_row[i0]; + } +} + +void ggml_sycl_add_id(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { + const ggml_tensor* src0 = dst->src[0]; + const ggml_tensor* src1 = dst->src[1]; + const ggml_tensor* src2 = dst->src[2]; + + GGML_TENSOR_TERNARY_OP_LOCALS + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src2->type == GGML_TYPE_I32); + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + GGML_ASSERT(nb20 == sizeof(int32_t)); + + const float* src0_d = (const float*)src0->data; + const float* src1_d = (const float*)src1->data; + const int32_t* src2_d = (const int32_t*)src2->data; + float* dst_d = (float*)dst->data; + + int threads = std::min((int)ne00, 768); // cols + ctx.stream()->parallel_for( + sycl::nd_range<3>( + sycl::range<3>(1, ne02, ne01) * sycl::range<3>(1, 1, threads), + sycl::range<3>(1, 1, threads)), + [=](sycl::nd_item<3> item_ct1) { + add_id_kernel( + src0_d, + src1_d, + src2_d, + dst_d, + ne0, + ne1, + nb01, + nb02, + nb11, + nb21, + item_ct1); + }); +} diff --git a/ggml/src/ggml-sycl/add-id.hpp b/ggml/src/ggml-sycl/add-id.hpp new file mode 100644 index 0000000000..e1b09ee8c7 --- /dev/null +++ b/ggml/src/ggml-sycl/add-id.hpp @@ -0,0 +1,8 @@ +#ifndef GGML_SYCL_ADD_ID_HPP +#define GGML_SYCL_ADD_ID_HPP + +#include "common.hpp" + +void ggml_sycl_add_id(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + +#endif // GGML_SYCL_ADD_ID_HPP diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 637630c1d2..519638fd41 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -642,5 +642,22 @@ static __dpct_inline__ sycl::uint2 fast_div_modulo(uint32_t n, const sycl::uint3 return sycl::uint2(div_val, mod_val); } +static __dpct_inline__ int ggml_sycl_dp4a(const int a, const int b, int c) { + return dpct::dp4a(a, b, c); +} + +static __dpct_inline__ float ggml_sycl_e8m0_to_fp32(uint8_t x) { + uint32_t bits; + if (x == 0) { + bits = 0x00400000; + } else { + bits = (uint32_t) x << 23; + } + + float result; + memcpy(&result, &bits, sizeof(float)); + return result; +} + #endif // GGML_SYCL_COMMON_HPP diff --git a/ggml/src/ggml-sycl/convert.cpp b/ggml/src/ggml-sycl/convert.cpp index 7c6ea8a57a..8bdae36458 100644 --- a/ggml/src/ggml-sycl/convert.cpp +++ b/ggml/src/ggml-sycl/convert.cpp @@ -472,6 +472,16 @@ static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int64_t k } } +template +static void dequantize_row_mxfp4_sycl(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) { + const int nb = (k + QK_K - 1) / QK_K; + stream->parallel_for( + sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { + dequantize_block_mxfp4(vx, y, item_ct1); + }); +} + template static void convert_unary_nc(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t s01, const int64_t s02, const int64_t s03, @@ -518,6 +528,7 @@ static void convert_unary_sycl(const void * vx, dst_t * y, const int64_t k, dpct convert_unary_nc_sycl(vx, y, k, 1, 1, 1, k, k, k, queue); } + to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst) { switch (type) { case GGML_TYPE_Q4_0: @@ -571,6 +582,8 @@ to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst) { return dequantize_row_iq4_xs_sycl; case GGML_TYPE_IQ4_NL: return dequantize_row_iq4_nl_sycl; + case GGML_TYPE_MXFP4: + return dequantize_row_mxfp4_sycl; case GGML_TYPE_F32: return convert_unary_sycl; #ifdef GGML_SYCL_HAS_BF16 @@ -636,6 +649,8 @@ to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type, ggml_tensor *dst) { return dequantize_row_iq4_xs_sycl; case GGML_TYPE_IQ4_NL: return dequantize_row_iq4_nl_sycl; + case GGML_TYPE_MXFP4: + return dequantize_row_mxfp4_sycl; case GGML_TYPE_F16: return convert_unary_sycl; #ifdef GGML_SYCL_HAS_BF16 diff --git a/ggml/src/ggml-sycl/dequantize.hpp b/ggml/src/ggml-sycl/dequantize.hpp index 540539bb22..da2a605daa 100644 --- a/ggml/src/ggml-sycl/dequantize.hpp +++ b/ggml/src/ggml-sycl/dequantize.hpp @@ -819,5 +819,23 @@ dequantize_block_iq4_xs(const void *__restrict__ vx, dst_t *__restrict__ yy, } } +template +static void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy, + const sycl::nd_item<3> &item_ct1) { + // auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>(); + const int64_t i = item_ct1.get_group(2); + const block_mxfp4 * x = (const block_mxfp4 *) vx + i*(QK_K/QK_MXFP4); + + const int64_t tid = item_ct1.get_local_id(2); + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 4*il; + const uint8_t * q4 = x[ib].qs + 4*il; + const float d = ggml_sycl_e8m0_to_fp32(x[ib].e); + for (int j = 0; j < 4; ++j) { + y[j+ 0] = d * kvalues_mxfp4[q4[j] & 0xf]*0.5f; + y[j+16] = d * kvalues_mxfp4[q4[j] >> 4]*0.5f; + } +} #endif // GGML_SYCL_DEQUANTIZE_HPP diff --git a/ggml/src/ggml-sycl/dpct/helper.hpp b/ggml/src/ggml-sycl/dpct/helper.hpp index f93cfa701f..30ec1e8daf 100644 --- a/ggml/src/ggml-sycl/dpct/helper.hpp +++ b/ggml/src/ggml-sycl/dpct/helper.hpp @@ -1860,10 +1860,31 @@ namespace dpct : id); } + template + using dot_product_acc_t = std::conditional_t< + std::is_unsigned_v && std::is_unsigned_v, + uint32_t, + int32_t>; + + template + sycl::vec extract_and_sign_or_zero_extend4(T val) { + return sycl::vec(val) + .template as, int8_t, uint8_t>, + 4>>() + .template convert(); + } + template - inline auto dp4a(T1 a, T2 b, T3 c) - { - return syclcompat::dp4a(a, b, c); + inline auto dp4a(T1 a, T2 b, T3 c) { + dot_product_acc_t res = c; + auto va = extract_and_sign_or_zero_extend4(a); + auto vb = extract_and_sign_or_zero_extend4(b); + res += va[0] * vb[0]; + res += va[1] * vb[1]; + res += va[2] * vb[2]; + res += va[3] * vb[3]; + return res; } struct sub_sat @@ -2972,6 +2993,38 @@ namespace dpct atomic_fetch_add(addr, operand, memoryOrder); } + inline unsigned int byte_level_permute( + unsigned int a, unsigned int b, unsigned int s) { + unsigned int ret; + ret = ((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) | + (((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) + << 8) | + (((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) + << 16) | + (((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) + << 24); + return ret; + } + + inline uint32_t byte_level_permute_custom( + uint32_t low32, uint32_t high32, uint32_t sel, int mode = 0) { + constexpr uint16_t lookup[6][4] = { + {0x3210, 0x4321, 0x5432, 0x6543}, // Forward 4-byte extract + {0x5670, 0x6701, 0x7012, 0x0123}, // Backward 4-byte extract + {0x0000, 0x1111, 0x2222, 0x3333}, // Replicate 8-bit values + {0x3210, 0x3211, 0x3222, 0x3333}, // Edge clamp left + {0x0000, 0x1110, 0x2210, 0x3210}, // Edge clamp right + {0x1010, 0x3232, 0x1010, 0x3232} // Replicate 16-bit values + }; + + if (mode >= 1 && mode <= 6) { + return byte_level_permute(low32, high32, lookup[mode - 1][sel & 0x3]); + } else if (!mode) { + return byte_level_permute(low32, high32, sel); + } + return 0; + } + } // COPY from DPCT head files #endif // GGML_SYCL_DPCT_HELPER_HPP diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index 7d54ce600e..8d83b2446b 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -911,6 +911,98 @@ static inline void ggml_sycl_op_swiglu(ggml_backend_sycl_context & ctx, ggml_ten }); } +__dpct_inline__ float ggml_sycl_op_swiglu_oai_single(float x, float g, float alpha = 1.702f, float limit = 7.0f) { + x = sycl::fmin(x, limit); + g = sycl::fmax(sycl::fmin(g, limit), -limit); + + float out_glu = x / (1.0f + sycl::native::exp(-x * alpha)); + out_glu = out_glu * (1.0f + g); + return out_glu; +} + + +template +static void swiglu_oai_kernel(const T * x, const T * g, T * dst, const int64_t k, + const int64_t n, const int64_t o0, const int64_t o1, + float alpha, float limit, sycl::nd_item<3> item_ct1) { + const int64_t i = int64_t(item_ct1.get_local_range(2)) * item_ct1.get_group(2) + item_ct1.get_local_id(2); + + if (i >= k) { + return; + } + + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + + float xi = x[j0]; + float gi = g[j1]; + + dst[i] = ggml_sycl_op_swiglu_oai_single(xi, gi, alpha, limit); +} + +template +static void swiglu_oai_sycl(const T * x, + const T * g, + T * dst, + const int64_t k, + const int64_t n, + const int64_t o0, + const int64_t o1, + const float alpha, + const float limit, + dpct::queue_ptr stream) { + const int64_t num_blocks = (k + SYCL_GLU_BLOCK_SIZE - 1) / SYCL_GLU_BLOCK_SIZE; + stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GLU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_GLU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + swiglu_oai_kernel(x, g, dst, k, n, o0, o1, alpha, limit, item_ct1); + }); +} + +void ggml_sycl_op_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + void * src0_d = src0->data; + void * src1_d = src1 ? src1->data : src0->data; + const int64_t src0_o = src0->nb[1]; + const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + void * dst_d = dst->data; + const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + dpct::queue_ptr stream = ctx.stream(); + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == dst->type); + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); + GGML_ASSERT(src1->ne[0] == nc); + GGML_ASSERT(src0->type == src1->type); + } + + //const int32_t swapped = ((const int32_t *) dst->op_params)[1]; + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + float * src0_p = (float *) src0_d; + float * src1_p = (float *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + swiglu_oai_sycl(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), alpha, limit, stream); +} + static inline void ggml_sycl_op_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { @@ -1070,6 +1162,11 @@ void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_op_swiglu(ctx, dst); } +void ggml_sycl_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_swiglu_oai(ctx, dst); +} + void ggml_sycl_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_geglu_erf(ctx, dst); diff --git a/ggml/src/ggml-sycl/element_wise.hpp b/ggml/src/ggml-sycl/element_wise.hpp index fcf93295cb..0913a2e529 100644 --- a/ggml/src/ggml-sycl/element_wise.hpp +++ b/ggml/src/ggml-sycl/element_wise.hpp @@ -5,6 +5,8 @@ #include "ggml.h" #include // For std::numeric_limits +#define SYCL_GLU_BLOCK_SIZE 256 + template T neg_infinity() { return -std::numeric_limits::infinity(); @@ -41,6 +43,8 @@ void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + void ggml_sycl_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 7449a91609..e996d98be8 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -39,6 +39,7 @@ #include "ggml-impl.h" #include "ggml-backend-impl.h" +#include "ggml-sycl/add-id.hpp" #include "ggml-sycl/backend.hpp" #include "ggml-sycl/common.hpp" #include "ggml-sycl/element_wise.hpp" @@ -3313,6 +3314,7 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; + // mmvq and mmq need the __dp4a instruction which is available for gen12+ // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); @@ -3320,7 +3322,6 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE); #endif // SYCL_USE_XMX - // mmvq path is faster in the CUDA backend. if (!g_ggml_sycl_prioritize_dmmv && (ctx.stream()->get_backend() == sycl::backend::ext_oneapi_cuda // Dispatch becomes obscure with the reorder, MMVQ when the reorder optimization @@ -3711,6 +3712,9 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_OP_ADD1: // TODO: more efficient implementation ggml_sycl_add(ctx, dst); break; + case GGML_OP_ADD_ID: + ggml_sycl_add_id(ctx, dst); + break; case GGML_OP_SUB: ggml_sycl_sub(ctx, dst); break; @@ -3803,6 +3807,9 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_GLU_OP_SWIGLU: ggml_sycl_swiglu(ctx, dst); break; + case GGML_GLU_OP_SWIGLU_OAI: + ggml_sycl_swiglu_oai(ctx, dst); + break; case GGML_GLU_OP_GEGLU_ERF: ggml_sycl_geglu_erf(ctx, dst); break; @@ -4397,6 +4404,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_GLU_OP_REGLU: case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_SWIGLU: + case GGML_GLU_OP_SWIGLU_OAI: case GGML_GLU_OP_GEGLU_ERF: case GGML_GLU_OP_GEGLU_QUICK: return ggml_is_contiguous_1(op->src[0]); @@ -4424,15 +4432,18 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g } } ggml_type src0_type = op->src[0]->type; - if (src0_type == GGML_TYPE_BF16 || src0_type == GGML_TYPE_MXFP4) { - // TODO: support MXFP4 + if (src0_type == GGML_TYPE_BF16 ) { + // TODO: support GGML_TYPE_BF16 // FIXME: keep a list of supported types to avoid breaking the backend when a new type is added return false; } + // TODO: The configuration below needs more work to be supported with oneDNN - if (ggml_is_permuted(a) && !ggml_is_contiguous(a) && a->ne[2] > 1 && a->ne[3] > 1) { - return false; + if (ggml_is_permuted(a) && !ggml_is_contiguous(a) && + a->ne[2] > 1 && a->ne[3] > 1 && src0_type == GGML_TYPE_F16) { + return false; } + // TODO: This specific configuration can fail with oneDNN and needs more debugging if (!ggml_is_permuted(a) && ggml_is_permuted(b) && b->ne[2] > 1 && b->ne[3] > 1 && a->ne[0] > 128 && a->ne[2] == 1 && src0_type == GGML_TYPE_F16) { @@ -4553,9 +4564,9 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: - return true; case GGML_OP_ADD: case GGML_OP_ADD1: + case GGML_OP_ADD_ID: case GGML_OP_SUB: case GGML_OP_COUNT_EQUAL: case GGML_OP_MUL: diff --git a/ggml/src/ggml-sycl/mmvq.cpp b/ggml/src/ggml-sycl/mmvq.cpp index 5b7f064074..316aa0d0fb 100644 --- a/ggml/src/ggml-sycl/mmvq.cpp +++ b/ggml/src/ggml-sycl/mmvq.cpp @@ -595,6 +595,25 @@ static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy, } } +static void mul_mat_vec_mxfp4_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, + dpct::queue_ptr stream) { + GGML_ASSERT(ncols % QK_MXFP4 == 0); + const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; + const sycl::range<3> block_nums(1, 1, block_num_y); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); + + { + stream->submit([&](sycl::handler & cgh) { + cgh.parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); + }); + } +} + + static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy, float *dst, const int ncols, const int nrows, @@ -1123,6 +1142,9 @@ void ggml_sycl_op_mul_mat_vec_q(ggml_backend_sycl_context & ctx, const ggml_tens case GGML_TYPE_IQ4_XS: mul_mat_vec_iq4_xs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); break; + case GGML_TYPE_MXFP4: + mul_mat_vec_mxfp4_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); + break; default: GGML_ABORT("fatal error"); } diff --git a/ggml/src/ggml-sycl/pad.cpp b/ggml/src/ggml-sycl/pad.cpp index 413712c584..f989c5e4b8 100644 --- a/ggml/src/ggml-sycl/pad.cpp +++ b/ggml/src/ggml-sycl/pad.cpp @@ -14,10 +14,10 @@ #include "pad.hpp" static void pad_f32(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3) { - auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>(); + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + sycl::nd_item<3> item_ct1) { int i0 = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); int i1 = item_ct1.get_group(1); @@ -63,7 +63,7 @@ static void pad_f32_sycl(const float *src, float *dst, const int lp0, sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { pad_f32(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, - ne2, ne3); + ne2, ne3, item_ct1); }); } diff --git a/ggml/src/ggml-sycl/ssm_conv.cpp b/ggml/src/ggml-sycl/ssm_conv.cpp index 0dc0f71c9a..eea9a73d67 100644 --- a/ggml/src/ggml-sycl/ssm_conv.cpp +++ b/ggml/src/ggml-sycl/ssm_conv.cpp @@ -88,7 +88,7 @@ void ggml_sycl_ssm_conv(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[1] == src0->ne[0] * static_cast(sizeof(float))); + GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); const int src_stride_inner = ncs; const int src_stride_seq = ncs * d_inner; diff --git a/ggml/src/ggml-sycl/vecdotq.hpp b/ggml/src/ggml-sycl/vecdotq.hpp index 4088ddb54f..43482b3672 100644 --- a/ggml/src/ggml-sycl/vecdotq.hpp +++ b/ggml/src/ggml-sycl/vecdotq.hpp @@ -20,6 +20,18 @@ typedef float (*vec_dot_q_sycl_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); +static __dpct_inline__ int get_int_b1(const void * x, const int & i32) { + const uint8_t * x8 = (const uint8_t *) x; + + int x32 = x8[4*i32 + 0] << 0; + x32 |= x8[4*i32 + 1] << 8; + x32 |= x8[4*i32 + 2] << 16; + x32 |= x8[4*i32 + 3] << 24; + + return x32; +} + + static __dpct_inline__ int get_int_from_int8(const int8_t* x8, const int& i32) { const uint16_t* x16 = (const uint16_t*)(x8 + sizeof(int) * i32); // assume at least 2 byte @@ -75,6 +87,28 @@ static __dpct_inline__ void get_int_from_table_16(const uint32_t &q4, val2 = v1 | (v2 << 16); } +static __dpct_inline__ sycl::int2 get_int_from_table_16( + const int& q4, const int8_t* table) { + const uint32_t* table32 = (const uint32_t*)table; + uint32_t tmp[2]; + const uint32_t low_high_selection_indices = + (0x32103210 | ((q4 & 0x88888888) >> 1)); +#pragma unroll + for (uint32_t i = 0; i < 2; ++i) { + const uint32_t shift = 16 * i; + + const uint32_t low = + dpct::byte_level_permute(table32[0], table32[1], q4 >> shift); + const uint32_t high = + dpct::byte_level_permute(table32[2], table32[3], q4 >> shift); + tmp[i] = dpct::byte_level_permute( + low, high, low_high_selection_indices >> shift); + } + return sycl::int2( + dpct::byte_level_permute(tmp[0], tmp[1], 0x6420), + dpct::byte_level_permute(tmp[0], tmp[1], 0x7531)); +} + #define VDR_Q2_K_Q8_1_MMVQ 1 // contiguous v/x values @@ -685,6 +719,30 @@ vec_dot_q4_1_q8_1(const void *__restrict__ vbq, return vec_dot_q4_1_q8_1_impl(v, u, bq4_1->dm, bq8_1->ds); } +#define VDR_MXFP4_Q8_1_MMVQ 2 +#define VDR_MXFP4_Q8_1_MMQ 4 + +static __dpct_inline__ float vec_dot_mxfp4_q8_1(const void * __restrict__ vbq, + const block_q8_1 * __restrict__ bq8_1, + const int & iqs) { + const block_mxfp4 * bq4 = (const block_mxfp4 *) vbq; + + const int * q8 = (const int *) bq8_1->qs + iqs; + + int sumi = 0; +#pragma unroll + for (int l = 0; l < VDR_MXFP4_Q8_1_MMVQ; ++l) { + const int aux_q4 = get_int_b1(bq4->qs, iqs + l); + const sycl::int2 v = get_int_from_table_16(aux_q4, kvalues_mxfp4); + sumi = ggml_sycl_dp4a(v.x(), q8[l + 0], sumi); + sumi = ggml_sycl_dp4a(v.y(), q8[l + 4], sumi); + } + + const float d = ggml_sycl_e8m0_to_fp32(bq4->e) * 0.5f * (bq8_1->ds)[0]; + return d * sumi; +} + + static __dpct_inline__ float vec_dot_q5_0_q8_1(const void *__restrict__ vbq, const block_q8_1 *__restrict__ bq8_1, const int &iqs) { From b1f3a6e5db7b782ef077bd0e8253ce03283b1f37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Mon, 15 Dec 2025 09:24:59 +0100 Subject: [PATCH 23/23] llama: automatically set parameters not set by the user in such a way that maximizes GPU utilization (#16653) * llama: automatically fit args to free memory llama-fit-params tool * fix CI * hints for bug reports, ensure no reallocation * fix segfault with Vulkan * add llama-fit-params to CI * fix CI * fix CI * fix CI * minor adjustments * fix assignment of 1 dense layer * fix logger not being reset on model load failure * remove --n-gpu-layer hint on model load failure * fix llama-fit-params verbosity * fix edge case * fix typo [no ci] --- .github/ISSUE_TEMPLATE/011-bug-results.yml | 9 +- ci/run.sh | 6 + common/arg.cpp | 33 +- common/common.cpp | 21 +- common/common.h | 14 +- ggml/include/ggml-alloc.h | 9 + ggml/include/ggml-backend.h | 1 + ggml/include/ggml.h | 3 +- ggml/src/ggml-alloc.c | 68 ++- ggml/src/ggml-backend.cpp | 23 +- ggml/src/ggml.c | 5 + include/llama.h | 19 +- src/llama-context.cpp | 69 ++- src/llama-context.h | 10 +- src/llama-hparams.h | 1 + src/llama-impl.cpp | 4 + src/llama-kv-cache.cpp | 23 +- src/llama-model-loader.cpp | 2 + src/llama-model-loader.h | 2 + src/llama-model.cpp | 38 +- src/llama-quant.cpp | 2 +- src/llama.cpp | 650 ++++++++++++++++++++- tools/CMakeLists.txt | 1 + tools/fit-params/CMakeLists.txt | 8 + tools/fit-params/README.md | 55 ++ tools/fit-params/fit-params.cpp | 62 ++ 26 files changed, 1075 insertions(+), 63 deletions(-) create mode 100644 tools/fit-params/CMakeLists.txt create mode 100644 tools/fit-params/README.md create mode 100644 tools/fit-params/fit-params.cpp diff --git a/.github/ISSUE_TEMPLATE/011-bug-results.yml b/.github/ISSUE_TEMPLATE/011-bug-results.yml index c42a14ff83..b815e70a8d 100644 --- a/.github/ISSUE_TEMPLATE/011-bug-results.yml +++ b/.github/ISSUE_TEMPLATE/011-bug-results.yml @@ -11,7 +11,7 @@ body: (i.e. the generated text) are incorrect or llama.cpp crashes during model evaluation. If you encountered the issue while using an external UI (e.g. ollama), please reproduce your issue using one of the examples/binaries in this repository. - The `llama-cli` binary can be used for simple and reproducible model inference. + The `llama-completion` binary can be used for simple and reproducible model inference. - type: textarea id: version attributes: @@ -74,9 +74,12 @@ body: Please give us a summary of the problem and tell us how to reproduce it. If you can narrow down the bug to specific hardware, compile flags, or command line arguments, that information would be very much appreciated by us. + + If possible, please try to reproduce the issue using `llama-completion` with `-fit off`. + If you can only reproduce the issue with `-fit on`, please provide logs both with and without `--verbose`. placeholder: > - e.g. when I run llama-cli with -ngl 99 I get garbled outputs. - When I use -ngl 0 it works correctly. + e.g. when I run llama-completion with `-fa on` I get garbled outputs for very long prompts. + With short prompts or `-fa off` it works correctly. Here are the exact commands that I used: ... validations: required: true diff --git a/ci/run.sh b/ci/run.sh index 0676504b3e..0a4a0e41eb 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -398,6 +398,8 @@ function gg_run_qwen3_0_6b { ./bin/llama-quantize ${model_bf16} ${model_q5_k} q5_k $(nproc) ./bin/llama-quantize ${model_bf16} ${model_q6_k} q6_k $(nproc) + (time ./bin/llama-fit-params --model ${model_f16} 2>&1 | tee -a $OUT/${ci}-fp-f16.log) + (time ./bin/llama-completion -no-cnv --model ${model_f16} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log (time ./bin/llama-completion -no-cnv --model ${model_bf16} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log (time ./bin/llama-completion -no-cnv --model ${model_q8_0} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log @@ -523,6 +525,8 @@ function gg_run_embd_bge_small { ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 + (time ./bin/llama-fit-params --model ${model_f16} 2>&1 | tee -a $OUT/${ci}-fp-f16.log) + (time ./bin/llama-embedding --model ${model_f16} -p "I believe the meaning of life is" -ngl 99 -c 0 --no-op-offload) 2>&1 | tee -a $OUT/${ci}-tg-f16.log (time ./bin/llama-embedding --model ${model_q8_0} -p "I believe the meaning of life is" -ngl 99 -c 0 --no-op-offload) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log @@ -563,6 +567,8 @@ function gg_run_rerank_tiny { model_f16="${path_models}/ggml-model-f16.gguf" + (time ./bin/llama-fit-params --model ${model_f16} 2>&1 | tee -a $OUT/${ci}-fp-f16.log) + # for this model, the SEP token is "" (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?\thi\nwhat is panda?\tit's a bear\nwhat is panda?\tThe giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." -ngl 99 -c 0 --pooling rank --embd-normalize -1 --no-op-offload --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log diff --git a/common/arg.cpp b/common/arg.cpp index aaa7b92a2e..acf4c8f8a8 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -529,7 +530,9 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context params.kv_overrides.back().key[0] = 0; } - if (!params.tensor_buft_overrides.empty()) { + // pad tensor_buft_overrides for llama_params_fit: + const size_t ntbo = llama_max_tensor_buft_overrides(); + while (params.tensor_buft_overrides.size() < ntbo) { params.tensor_buft_overrides.push_back({nullptr, nullptr}); } @@ -2153,6 +2156,34 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } } ).set_env("LLAMA_ARG_MAIN_GPU")); + add_opt(common_arg( + { "-fit", "--fit" }, "[on|off]", + string_format("whether to adjust unset arguments to fit in device memory ('on' or 'off', default: '%s')", params.fit_params ? "on" : "off"), + [](common_params & params, const std::string & value) { + if (is_truthy(value)) { + params.fit_params = true; + } else if (is_falsey(value)) { + params.fit_params = false; + } else { + throw std::runtime_error( + string_format("error: unkown value for --fit: '%s'\n", value.c_str())); + } + } + ).set_env("LLAMA_ARG_FIT")); + add_opt(common_arg( + { "-fitt", "--fit-target" }, "MiB", + string_format("target margin per device for --fit option, default: %zu", params.fit_params_target/(1024*1024)), + [](common_params & params, int value) { + params.fit_params_target = value * size_t(1024*1024); + } + ).set_env("LLAMA_ARG_FIT_TARGET")); + add_opt(common_arg( + { "-fitc", "--fit-ctx" }, "N", + string_format("minimum ctx size that can be set by --fit option, default: %" PRIu32, params.fit_params_min_ctx), + [](common_params & params, int value) { + params.fit_params_min_ctx = value; + } + ).set_env("LLAMA_ARG_FIT_CTX")); add_opt(common_arg( {"--check-tensors"}, string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"), diff --git a/common/common.cpp b/common/common.cpp index b76dfa10ea..5a8cf52485 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1088,7 +1088,15 @@ struct common_init_result::impl { common_init_result::common_init_result(common_params & params) : pimpl(new impl{}) { - const auto mparams = common_model_params_to_llama(params); + auto mparams = common_model_params_to_llama(params); + auto cparams = common_context_params_to_llama(params); + + if (params.fit_params) { + LOG_INF("%s: fitting params to device memory, to report bugs during this step use -fit off (or --verbose if you can't)\n", __func__); + llama_params_fit(params.model.path.c_str(), &mparams, &cparams, + params.tensor_split, params.tensor_buft_overrides.data(), params.fit_params_target, params.fit_params_min_ctx, + params.verbosity >= 4 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_ERROR); + } llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); if (model == NULL) { @@ -1103,8 +1111,6 @@ common_init_result::common_init_result(common_params & params) : // TODO: fix naming common_init_sampler_from_model(model, params.sampling); - auto cparams = common_context_params_to_llama(params); - if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__); params.sampling.ignore_eos = false; @@ -1143,8 +1149,7 @@ common_init_result::common_init_result(common_params & params) : llama_context * lctx = llama_init_from_model(model, cparams); if (lctx == NULL) { - LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", - __func__, params.model.path.c_str()); + LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str()); return; } @@ -1176,15 +1181,13 @@ common_init_result_ptr common_init_from_params(common_params & params) { llama_model * model = res->model(); if (model == NULL) { - LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", - __func__, params.model.path.c_str()); + LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str()); return res; } llama_context * lctx = res->context(); if (lctx == NULL) { - LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", - __func__, params.model.path.c_str()); + LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str()); return res; } diff --git a/common/common.h b/common/common.h index 4edb74b706..d70744840f 100644 --- a/common/common.h +++ b/common/common.h @@ -99,6 +99,7 @@ enum llama_example { LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_DIFFUSION, LLAMA_EXAMPLE_FINETUNE, + LLAMA_EXAMPLE_FIT_PARAMS, LLAMA_EXAMPLE_COUNT, }; @@ -306,8 +307,8 @@ struct lr_opt { struct ggml_opt_optimizer_params common_opt_lr_pars(void * userdata); struct common_params { - int32_t n_predict = -1; // new tokens to predict - int32_t n_ctx = 4096; // context size + int32_t n_predict = -1; // max. number of new tokens to predict, -1 == no limit + int32_t n_ctx = 0; // context size, 0 == context the model was trained with int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt @@ -328,9 +329,12 @@ struct common_params { // offload params std::vector devices; // devices to use for offloading - int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) - int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors - float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs + int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) + int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors + float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs + bool fit_params = true; // whether to fit unset model/context parameters to free device memory + size_t fit_params_target = 1024 * 1024*1024; // margin per device in bytes for fitting parameters to free memory + int32_t fit_params_min_ctx = 4096; // minimum context size to set when trying to reduce memory use enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs diff --git a/ggml/include/ggml-alloc.h b/ggml/include/ggml-alloc.h index 2cb150fd2a..78aa059dde 100644 --- a/ggml/include/ggml-alloc.h +++ b/ggml/include/ggml-alloc.h @@ -53,7 +53,14 @@ GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc); // call with a worst-case graph to avoid buffer reallocations // not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed // returns false if the buffer allocation failed +// ggml_gallocr_resrve_n_size writes the buffer sizes per galloc buffer that would be allocated by ggml_gallocr_reserve_n to sizes GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph); +GGML_API void ggml_gallocr_reserve_n_size( + ggml_gallocr_t galloc, + struct ggml_cgraph * graph, + const int * node_buffer_ids, + const int * leaf_buffer_ids, + size_t * sizes); GGML_API bool ggml_gallocr_reserve_n( ggml_gallocr_t galloc, struct ggml_cgraph * graph, @@ -68,6 +75,8 @@ GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_i // Utils // Create a buffer and allocate all the tensors in a ggml_context +// ggml_backend_alloc_ctx_tensors_from_buft_size returns the size of the buffer that would be allocated by ggml_backend_alloc_ctx_tensors_from_buft +GGML_API size_t ggml_backend_alloc_ctx_tensors_from_buft_size(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend); diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h index f1b7407859..4ed5f35774 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -307,6 +307,7 @@ extern "C" { GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); // Initialize backend buffers from a measure graph + GGML_API void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes); GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); // returns success GGML_API int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched); diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 686da3dbd1..20c912d0e9 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -2615,7 +2615,8 @@ extern "C" { // Set callback for all future logging events. // If this is not called, or NULL is supplied, everything is output on stderr. - GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data); + GGML_API void ggml_log_get(ggml_log_callback * log_callback, void ** user_data); + GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data); GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); diff --git a/ggml/src/ggml-alloc.c b/ggml/src/ggml-alloc.c index ec16cbda9f..41419b617b 100644 --- a/ggml/src/ggml-alloc.c +++ b/ggml/src/ggml-alloc.c @@ -594,7 +594,9 @@ static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) { } static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) { - return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated; + return t->data != NULL // tensor data already set externally + || t->buffer // tensor on external buffer (but not yet allocated) + || ggml_gallocr_is_own(galloc, t); // tensor will be allocated by galloc } // free the extra space at the end if the new tensor is smaller @@ -823,7 +825,8 @@ static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgr } } -bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { +static bool ggml_gallocr_reserve_n_impl( + ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, bool no_alloc) { size_t min_hash_size = graph->n_nodes + graph->n_leafs; // add 25% margin to avoid hash collisions min_hash_size += min_hash_size / 4; @@ -928,16 +931,19 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0; if (cur_size > 0) { GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", - __func__, ggml_backend_buft_name(galloc->bufts[i]), - cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); + __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); } } #endif ggml_vbuffer_free(galloc->buffers[i]); - galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE); - if (galloc->buffers[i] == NULL) { - GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size); - return false; + if (no_alloc) { + galloc->buffers[i] = NULL; + } else { + galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE); + if (galloc->buffers[i] == NULL) { + GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size); + return false; + } } } } @@ -945,6 +951,21 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c return true; } +void ggml_gallocr_reserve_n_size( + ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, size_t * sizes) { + GGML_ASSERT(ggml_gallocr_reserve_n_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids, /*no_alloc =*/ true)); + for (int i = 0; i < galloc->n_buffers; i++) { + sizes[i] = 0; + for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) { + sizes[i] += galloc->buf_tallocs[i]->chunks[c]->max_size; + } + } +} + +bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { + return ggml_gallocr_reserve_n_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids, /*no_alloc =*/ false); +} + bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) { return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL); } @@ -1147,7 +1168,8 @@ static bool alloc_tensor_range(struct ggml_context * ctx, return true; } -ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { +static ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft_impl( + struct ggml_context * ctx, ggml_backend_buffer_type_t buft, size_t * nbytes_total, bool no_alloc) { GGML_ASSERT(ggml_get_no_alloc(ctx) == true); size_t alignment = ggml_backend_buft_get_alignment(buft); @@ -1155,6 +1177,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte ggml_backend_buffer_t * buffers = NULL; size_t n_buffers = 0; + *nbytes_total = 0; size_t cur_buf_size = 0; struct ggml_tensor * first = ggml_get_first_tensor(ctx); @@ -1166,10 +1189,11 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) { // allocate tensors in the current buffer - if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { + if (!no_alloc && !alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { return NULL; } first = t; + *nbytes_total += cur_buf_size; cur_buf_size = this_size; } else { cur_buf_size += this_size; @@ -1178,15 +1202,21 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte // allocate remaining tensors if (cur_buf_size > 0) { - if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) { + *nbytes_total += cur_buf_size; + if (!no_alloc && !alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) { return NULL; } } + if (no_alloc) { + return NULL; + } + if (n_buffers == 0) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__); #endif + GGML_ASSERT(!buffers); return NULL; } @@ -1196,10 +1226,24 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte } else { buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers); } - free(buffers); + if (buffers) { + free(buffers); // can be NULL if context is empty or no_alloc + } return buffer; } +size_t ggml_backend_alloc_ctx_tensors_from_buft_size(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { + size_t nbytes_total = 0; + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft_impl(ctx, buft, &nbytes_total, /*no_alloc=*/ true); + GGML_ASSERT(!buf); + return nbytes_total; +} + +ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { + size_t nbytes_total = 0; + return ggml_backend_alloc_ctx_tensors_from_buft_impl(ctx, buft, &nbytes_total, /*no_alloc =*/ false); +} + ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) { return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend)); } diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index 08681f35e3..8547ecc849 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -36,12 +36,11 @@ const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { } ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + GGML_ASSERT(buft); if (size == 0) { // return a dummy buffer for zero-sized allocations return ggml_backend_buffer_init(buft, {}, NULL, 0); } - - GGML_ASSERT(buft); return buft->iface.alloc_buffer(buft, size); } @@ -128,6 +127,12 @@ void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { return NULL; } + // FIXME JG: a multi_buffer has a non-zero size, according to the above comment get_base is not optional, + // I don't know whether the above comment is correct + if (!buffer->iface.get_base) { + return NULL; + } + void * base = buffer->iface.get_base(buffer); GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL"); @@ -1727,6 +1732,20 @@ void ggml_backend_sched_reset(ggml_backend_sched_t sched) { sched->is_alloc = false; } +void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes) { + GGML_ASSERT(sched); + GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); + GGML_ASSERT(sizes); + + ggml_backend_sched_reset(sched); + + ggml_backend_sched_synchronize(sched); + + ggml_backend_sched_split_graph(sched, measure_graph); + + ggml_gallocr_reserve_n_size(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids, sizes); +} + bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { GGML_ASSERT(sched); GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index f0913cd359..eb3ae72eaa 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -7566,6 +7566,11 @@ size_t ggml_quantize_chunk( //////////////////////////////////////////////////////////////////////////////// +void ggml_log_get(ggml_log_callback * log_callback, void ** user_data) { + *log_callback = g_logger_state.log_callback; + *user_data = g_logger_state.log_callback_user_data; +} + void ggml_log_set(ggml_log_callback log_callback, void * user_data) { g_logger_state.log_callback = log_callback ? log_callback : ggml_log_callback_default; g_logger_state.log_callback_user_data = user_data; diff --git a/include/llama.h b/include/llama.h index b52eaacfa7..f862930099 100644 --- a/include/llama.h +++ b/include/llama.h @@ -313,6 +313,7 @@ extern "C" { bool check_tensors; // validate model tensor data bool use_extra_bufts; // use extra buffer types (used for weight repacking) bool no_host; // bypass host buffer allowing extra buffers to be used + bool no_alloc; // only load metadata and simulate memory allocations }; // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations @@ -466,10 +467,24 @@ extern "C" { // Frees all allocated memory LLAMA_API void llama_free(struct llama_context * ctx); + // fits mparams and cparams to free device memory (assumes system memory is unlimited) + // returns true if the parameters could be successfully modified to fit device memory + // this function is NOT thread safe because it modifies the global llama logger state + LLAMA_API bool llama_params_fit( + const char * path_model, + struct llama_model_params * mparams, + struct llama_context_params * cparams, + float * tensor_split, // writable buffer for tensor split, needs at least llama_max_devices elements + struct llama_model_tensor_buft_override * tensor_buft_overrides, // writable buffer for overrides, needs at least llama_max_tensor_buft_overrides elements + size_t margin, // margin of memory to leave per device in bytes + uint32_t n_ctx_min, // minimum context size to set when trying to reduce memory use + enum ggml_log_level log_level); // minimum log level to print during fitting, lower levels go to debug log + LLAMA_API int64_t llama_time_us(void); LLAMA_API size_t llama_max_devices(void); LLAMA_API size_t llama_max_parallel_sequences(void); + LLAMA_API size_t llama_max_tensor_buft_overrides(void); LLAMA_API bool llama_supports_mmap (void); LLAMA_API bool llama_supports_mlock (void); @@ -1354,7 +1369,9 @@ extern "C" { // Set callback for all future logging events. // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); + // The logger state is global so these functions are NOT thread safe. + LLAMA_API void llama_log_get(ggml_log_callback * log_callback, void ** user_data); + LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); // // Performance utils diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 2a17e44ecd..8786d4ee3e 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -258,6 +258,7 @@ llama_context::llama_context( backend_buft.clear(); backend_ptrs.clear(); + backend_buf_exp_size.clear(); for (auto & backend : backends) { auto * buft = ggml_backend_get_default_buffer_type(backend.get()); @@ -274,6 +275,7 @@ llama_context::llama_context( backend_buft.push_back(buft); backend_ptrs.push_back(backend.get()); + backend_buf_exp_size.push_back(0); } LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size()); @@ -389,7 +391,8 @@ llama_context::llama_context( // reserve pp (prompt processing) graph first so that buffers are only allocated once { - auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get(), + model.hparams.no_alloc, model.hparams.no_alloc ? backend_buf_exp_size.data() : nullptr); if (!gf) { if (pipeline_parallel) { LLAMA_LOG_WARN("%s: compute buffer allocation failed, retrying without pipeline parallelism\n", __func__); @@ -407,7 +410,7 @@ llama_context::llama_context( // reserve with tg (token generation) graph to get the number of splits and nodes { - auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get()); + auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get(), model.hparams.no_alloc); if (!gf) { throw std::runtime_error("failed to allocate compute tg buffers"); } @@ -422,7 +425,7 @@ llama_context::llama_context( // // auto * gf = graph_reserve(n_tokens, 1, n_tokens, mctx.get()); // - auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get(), model.hparams.no_alloc); if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } @@ -431,11 +434,13 @@ llama_context::llama_context( for (size_t i = 0; i < backend_ptrs.size(); ++i) { ggml_backend_t backend = backend_ptrs[i]; ggml_backend_buffer_type_t buft = backend_buft[i]; - size_t size = ggml_backend_sched_get_buffer_size(sched.get(), backend); - if (size > 1) { + if (!model.hparams.no_alloc) { + backend_buf_exp_size[i] = ggml_backend_sched_get_buffer_size(sched.get(), backend); + } + if (backend_buf_exp_size[i] > 1) { LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, ggml_backend_buft_name(buft), - size / 1024.0 / 1024.0); + backend_buf_exp_size[i] / 1024.0 / 1024.0); } } @@ -454,6 +459,23 @@ llama_context::llama_context( } llama_context::~llama_context() { + // FIXME this currently results in a use-after-free bug if the model is freed before the context + // if (!model.hparams.no_alloc) { + // for (size_t i = 0; i < backend_ptrs.size(); ++i) { + // ggml_backend_t backend = backend_ptrs[i]; + // ggml_backend_buffer_type_t buft = backend_buft[i]; + + // const size_t size_exp = backend_buf_exp_size[i]; + // const size_t size_act = ggml_backend_sched_get_buffer_size(sched.get(), backend); + // if (size_exp == size_act) { + // LLAMA_LOG_DEBUG("%s: %10s compute buffer size is %8.4f MiB, matches expectation of %8.4f MiB\n", + // __func__, ggml_backend_buft_name(buft), size_act / (1024.0*1024.0), size_exp / (1024.0*1024.0)); + // } else { + // LLAMA_LOG_WARN("%s: %10s compute buffer size of %8.4f MiB, does not match expectation of %8.4f MiB\n", + // __func__, ggml_backend_buft_name(buft), size_act / (1024.0*1024.0), size_exp / (1024.0*1024.0)); + // } + // } + // } ggml_opt_free(opt_ctx); } @@ -1428,7 +1450,8 @@ llm_graph_result * llama_context::get_gf_res_reserve() const { return static_cast(gf_res_reserve.get()); } -ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only) { +ggml_cgraph * llama_context::graph_reserve( + uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only, size_t * sizes) { LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); GGML_ASSERT(n_outputs >= 1); @@ -1465,8 +1488,13 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u // initialize scheduler with the specified graph if (split_only) { - ggml_backend_sched_split_graph(sched.get(), gf); + if (sizes) { + ggml_backend_sched_reserve_size(sched.get(), gf, sizes); + } else { + ggml_backend_sched_split_graph(sched.get(), gf); + } } else if (!ggml_backend_sched_reserve(sched.get(), gf)) { + GGML_ASSERT(!sizes); LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); return nullptr; } @@ -2088,15 +2116,26 @@ void llama_context::perf_reset() { std::map llama_context::memory_breakdown() const { std::map ret; - for (const auto & buft_size : model.memory_breakdown()) { - ret[buft_size.first].model += buft_size.second; + for (const auto & [buft, size] : model.memory_breakdown()) { + ret[buft].model += size; } - for (const auto & buft_size : memory->memory_breakdown()) { - ret[buft_size.first].context += buft_size.second; + if (memory) { + for (const auto & [buft, size] : memory->memory_breakdown()) { + ret[buft].context += size; + } } - for (const auto & backend_ptr : backends) { - ggml_backend_t backend = backend_ptr.get(); - ret[ggml_backend_sched_get_buffer_type(sched.get(), backend)].compute += ggml_backend_sched_get_buffer_size(sched.get(), backend); + if (model.hparams.no_alloc) { + for (size_t i = 0; i < backends.size(); ++i) { + ggml_backend_t backend = backends[i].get(); + ggml_backend_buffer_type_t buft = ggml_backend_sched_get_buffer_type(sched.get(), backend); + ret[buft].compute += backend_buf_exp_size[i]; + } + } else { + for (const auto & backend_ptr : backends) { + ggml_backend_t backend = backend_ptr.get(); + ggml_backend_buffer_type_t buft = ggml_backend_sched_get_buffer_type(sched.get(), backend); + ret[buft].compute += ggml_backend_sched_get_buffer_size(sched.get(), backend); + } } return ret; } diff --git a/src/llama-context.h b/src/llama-context.h index cd26eafe18..c31101330e 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -26,6 +26,10 @@ struct llama_memory_breakdown_data { size_t model = 0; // memory allocated for the model size_t context = 0; // memory allocated for the context size_t compute = 0; // memory allocated for temporary compute buffers + + size_t total() const { + return model + context + compute; + } }; struct llama_context { @@ -206,7 +210,8 @@ public: ggml_status graph_compute(ggml_cgraph * gf, bool batched); // reserve a graph with a dummy ubatch of the specified size - ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only = false); + ggml_cgraph * graph_reserve( + uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only = false, size_t * sizes = nullptr); private: llm_graph_params graph_params( @@ -281,9 +286,10 @@ private: std::vector> set_n_threads_fns; - // buffer types used for the compute buffer of each backend + // pointers and buffer types used for the compute buffer of each backend std::vector backend_ptrs; std::vector backend_buft; + std::vector backend_buf_exp_size; // expected buffer sizes llm_graph_result_ptr gf_res_prev; llm_graph_result_ptr gf_res_reserve; diff --git a/src/llama-hparams.h b/src/llama-hparams.h index a467c64a14..cecb476e91 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -34,6 +34,7 @@ struct llama_hparams_convnext { struct llama_hparams { bool vocab_only; + bool no_alloc; bool rope_finetuned; bool use_par_res; bool swin_norm; diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp index c7a1880aad..8e3e7b223a 100644 --- a/src/llama-impl.cpp +++ b/src/llama-impl.cpp @@ -25,6 +25,10 @@ time_meas::~time_meas() { } } +void llama_log_get(ggml_log_callback * log_callback, void ** user_data) { + ggml_log_get(log_callback, user_data); +} + void llama_log_set(ggml_log_callback log_callback, void * user_data) { ggml_log_set(log_callback, user_data); g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default; diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp index bf3de2f2ef..4cbbca0925 100644 --- a/src/llama-kv-cache.cpp +++ b/src/llama-kv-cache.cpp @@ -175,7 +175,15 @@ llama_kv_cache::llama_kv_cache( // allocate tensors and initialize the buffers to avoid NaNs in the padding for (auto & [buft, ctx] : ctx_map) { - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft); + ggml_backend_buffer_t buf; + if (model.hparams.no_alloc) { + buf = ggml_backend_buft_alloc_buffer(buft, /*size =*/ 0); // dummy buffer + for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != nullptr; t = ggml_get_next_tensor(ctx.get(), t)) { + t->buffer = buf; // set dummy buffer for KV cache so that the backend scheduler won't try to allocate it + } + } else { + buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft); // real buffer + } if (!buf) { throw std::runtime_error("failed to allocate buffer for kv cache"); } @@ -482,9 +490,18 @@ llama_pos llama_kv_cache::seq_pos_max(llama_seq_id seq_id) const { std::map llama_kv_cache::memory_breakdown() const { std::map ret; - for (const auto & [_, buf] : ctxs_bufs) { - ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get()); + for (const auto & [ctx, buf] : ctxs_bufs) { + ggml_backend_buffer_type_t buft = ggml_backend_buffer_get_type(buf.get()); + + if (hparams.no_alloc) { + GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) == nullptr); + ret[buft] += ggml_backend_alloc_ctx_tensors_from_buft_size(ctx.get(), buft); + } else { + // GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) != nullptr); // multi_buffer does not have a defined base + ret[buft] += ggml_backend_buffer_get_size(buf.get()); + } } + return ret; } diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index aa3a65f87a..ca2ea2461d 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -473,6 +473,7 @@ llama_model_loader::llama_model_loader( std::vector & splits, bool use_mmap, bool check_tensors, + bool no_alloc, const llama_model_kv_override * param_overrides_p, const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) { int trace = 0; @@ -716,6 +717,7 @@ llama_model_loader::llama_model_loader( this->use_mmap = use_mmap; this->check_tensors = check_tensors; + this->no_alloc = no_alloc; } std::string llama_model_loader::get_arch_name() const { diff --git a/src/llama-model-loader.h b/src/llama-model-loader.h index c9189f6cb4..0380c92fde 100644 --- a/src/llama-model-loader.h +++ b/src/llama-model-loader.h @@ -71,6 +71,7 @@ struct llama_model_loader { bool use_mmap = false; bool check_tensors; + bool no_alloc; llama_files files; llama_ftype ftype; @@ -97,6 +98,7 @@ struct llama_model_loader { std::vector & splits, // optional, only need if the split does not follow naming scheme bool use_mmap, bool check_tensors, + bool no_alloc, const llama_model_kv_override * param_overrides_p, const llama_model_tensor_buft_override * param_tensor_buft_overrides_p); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 28f06b4e61..e4d2138056 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -6606,9 +6606,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) { std::vector bufs; if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) { + GGML_ASSERT(!ml.no_alloc); for (uint32_t idx = 0; idx < ml.files.size(); idx++) { // only the mmap region containing the tensors in the model is mapped to the backend buffer - // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers + // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, + // then we could just use metal for all layers // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size void * addr = nullptr; size_t first, last; // NOLINT @@ -6624,9 +6626,16 @@ bool llama_model::load_tensors(llama_model_loader & ml) { bufs.emplace_back(buf); buf_map.emplace(idx, buf); } - } - else { - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + } else { + ggml_backend_buffer_t buf; + if (ml.no_alloc) { + buf = ggml_backend_buft_alloc_buffer(buft, /*size =*/ 0); // dummy buffer + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { + t->buffer = buf; // set dummy buffer for weights so that the backend scheduler won't try to allocate them + } + } else { + buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); // real buffer + } if (buf == nullptr) { throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft))); } @@ -6681,6 +6690,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } } + if (ml.no_alloc) { + return true; + } + // load tensor data for (auto & [ctx, buf_map] : ctx_buf_maps) { if (!ml.load_all_data(ctx, buf_map, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) { @@ -6723,9 +6736,18 @@ size_t llama_model::n_devices() const { std::map llama_model::memory_breakdown() const { std::map ret; - for (const auto & [_, bufs] : pimpl->ctxs_bufs) { - for (const auto & buf : bufs) { - ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get()); + for (const auto & [ctx, bufs] : pimpl->ctxs_bufs) { + if (hparams.no_alloc) { + GGML_ASSERT(bufs.size() == 1); + ggml_backend_buffer_t buf = bufs[0].get(); + GGML_ASSERT(ggml_backend_buffer_get_base(buf) == nullptr); + ggml_backend_buffer_type_t buft = ggml_backend_buffer_get_type(buf); + ret[buft] += ggml_backend_alloc_ctx_tensors_from_buft_size(ctx.get(), buft); + } else { + for (const auto & buf : bufs) { + // GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) != nullptr); // multi_buffer does not have a defined base + ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get()); + } } } return ret; @@ -6770,6 +6792,7 @@ void llama_model::print_info() const { // hparams LLAMA_LOG_INFO("%s: arch = %s\n", __func__, arch_name().c_str()); LLAMA_LOG_INFO("%s: vocab_only = %d\n", __func__, hparams.vocab_only); + LLAMA_LOG_INFO("%s: no_alloc = %d\n", __func__, hparams.no_alloc); if (!hparams.vocab_only) { LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train); @@ -7618,6 +7641,7 @@ llama_model_params llama_model_default_params() { /*.check_tensors =*/ false, /*.use_extra_bufts =*/ true, /*.no_host =*/ false, + /*.no_alloc =*/ false, }; return result; diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 351dcb7baa..bc4b05c3b5 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -596,7 +596,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } std::vector splits = {}; - llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides, nullptr); + llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, /*no_alloc*/ false, kv_overrides, nullptr); ml.init_mappings(false); // no prefetching llama_model model(llama_model_default_params()); diff --git a/src/llama.cpp b/src/llama.cpp index ab2e9868af..7ed34b80ae 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -1,6 +1,9 @@ +#include "llama.h" + #include "llama-impl.h" #include "llama-chat.h" +#include "llama-context.h" #include "llama-mmap.h" #include "llama-vocab.h" #include "llama-model-loader.h" @@ -11,11 +14,14 @@ #include "ggml-backend.h" #include +#include +#include #include #include #include #include #include +#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data @@ -37,6 +43,643 @@ const char * llama_flash_attn_type_name(enum llama_flash_attn_type flash_attn_ty GGML_ABORT("fatal error"); } +struct llama_device_memory_data { + int64_t total; + int64_t free; + llama_memory_breakdown_data mb; +}; + +static std::vector llama_get_device_memory_data( + const char * path_model, const llama_model_params * mparams, const llama_context_params * cparams, + std::vector & devs, uint32_t & hp_ngl, uint32_t & hp_n_ctx_train, uint32_t & hp_n_expert, + const ggml_log_level log_level) { + struct user_data_t { + struct { + ggml_log_callback callback; + void * user_data; + } original_logger; + ggml_log_level min_level; // prints below this log level go to debug log + }; + user_data_t ud; + llama_log_get(&ud.original_logger.callback, &ud.original_logger.user_data); + ud.min_level = log_level; + + llama_log_set([](ggml_log_level level, const char * text, void * user_data) { + const user_data_t * ud = (const user_data_t *) user_data; + const ggml_log_level level_eff = level >= ud->min_level ? level : GGML_LOG_LEVEL_DEBUG; + ud->original_logger.callback(level_eff, text, ud->original_logger.user_data); + }, &ud); + + llama_model_params mparams_copy = *mparams; + mparams_copy.no_alloc = true; + mparams_copy.use_mmap = false; + + llama_model * model = llama_model_load_from_file(path_model, mparams_copy); + if (model == nullptr) { + llama_log_set(ud.original_logger.callback, ud.original_logger.user_data); + throw std::runtime_error("failed to load model"); + } + + llama_context * ctx = llama_init_from_model(model, *cparams); + if (ctx == nullptr) { + llama_model_free(model); + llama_log_set(ud.original_logger.callback, ud.original_logger.user_data); + throw std::runtime_error("failed to create llama_context from model"); + } + + std::vector ret(model->devices.size()); + + std::map memory_breakdown = ctx->memory_breakdown(); + + for (const auto & [buft, mb] : memory_breakdown) { + if (ggml_backend_buft_is_host(buft)) { + continue; + } + + ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft); + if (!dev) { + continue; + } + for (size_t i = 0; i < ret.size(); i++) { + if (model->devices[i] == dev) { + ret[i].mb.model += mb.model; + ret[i].mb.context += mb.context; + ret[i].mb.compute += mb.compute; + break; + } + } + } + for (size_t i = 0; i < ret.size(); i++) { + size_t free, total; + ggml_backend_dev_memory(model->devices[i], &free, &total); + ret[i].free = free; + ret[i].total = total; + } + + devs = model->devices; + hp_ngl = model->hparams.n_layer; + hp_n_ctx_train = model->hparams.n_ctx_train; + hp_n_expert = model->hparams.n_expert; + + llama_memory_breakdown_print(ctx); // goes to debug log + + llama_free(ctx); + llama_model_free(model); + llama_log_set(ud.original_logger.callback, ud.original_logger.user_data); + return ret; +} + +// enum to identify part of a layer for distributing its tensors: +enum layer_fraction_t { + LAYER_FRACTION_NONE = 0, // nothing + LAYER_FRACTION_ATTN = 1, // attention + LAYER_FRACTION_UP = 2, // attention + up + LAYER_FRACTION_GATE = 3, // attention + up + gate + LAYER_FRACTION_MOE = 4, // everything but sparse MoE weights +}; +// this enum is only used in llama_params_fit_impl but needs to be defined outside of it to fix a Windows compilation issue + +static void llama_params_fit_impl( + const char * path_model, struct llama_model_params * mparams, struct llama_context_params * cparams, + float * tensor_split, struct llama_model_tensor_buft_override * tensor_buft_overrides, + size_t margin_s, uint32_t n_ctx_min, enum ggml_log_level log_level) { + constexpr int64_t MiB = 1024*1024; + const int64_t margin = margin_s; // this function uses int64_t rather than size_t for memory sizes to more conveniently handle deficits + typedef std::vector dmds_t; + const llama_model_params default_mparams = llama_model_default_params(); + + std::vector devs; + uint32_t hp_ngl = 0; // hparams.n_gpu_layers + uint32_t hp_nct = 0; // hparams.n_ctx_train + uint32_t hp_nex = 0; // hparams.n_expert + + // step 1: get data for default parameters and check whether any changes are necessary in the first place + + LLAMA_LOG_DEBUG("%s: getting device memory data for initial parameters:\n", __func__); + const dmds_t dmds_full = llama_get_device_memory_data(path_model, mparams, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level); + const size_t nd = devs.size(); // number of devices + if (nd == 0) { + LLAMA_LOG_INFO("%s: no devices with dedicated memory found\n", __func__); + return; + } + + std::vector dev_names; + { + dev_names.reserve(nd); + size_t max_length = 0; + for (ggml_backend_dev_t dev : devs) { + std::string name = ggml_backend_dev_name(dev); + name += " ("; + name += ggml_backend_dev_description(dev); + name += ")"; + dev_names.push_back(name); + max_length = std::max(max_length, name.length()); + } + for (std::string & dn : dev_names) { + dn.insert(dn.end(), max_length - dn.length(), ' '); + } + } + + int64_t sum_total = 0; + int64_t sum_projected_free = 0; + int64_t min_projected_free = INT64_MAX; + int64_t sum_projected_used = 0; + int64_t sum_projected_ctx = 0; + + if (nd > 1) { + LLAMA_LOG_INFO("%s: projected memory use with initial parameters [MiB]:\n", __func__); + } + for (size_t id = 0; id < nd; id++) { + const llama_device_memory_data & dmd = dmds_full[id]; + + const int64_t projected_used = dmd.mb.total(); + const int64_t projected_free = dmd.free - projected_used; + + sum_total += dmd.total; + sum_projected_used += projected_used; + sum_projected_free += projected_free; + min_projected_free = std::min(min_projected_free, projected_free); + sum_projected_ctx += dmd.mb.context; + + if (nd > 1) { + LLAMA_LOG_INFO("%s: - %s: %6" PRId64 " total, %6" PRId64 " used, %6" PRId64 " %s\n", + __func__, dev_names[id].c_str(), dmd.total/MiB, projected_used/MiB, std::abs(projected_free)/MiB, + projected_free >= 0 ? "surplus" : "deficit"); + } + } + assert(sum_total >= 0 && sum_projected_used >= 0 && sum_projected_ctx >= 0); + assert(sum_projected_used >= sum_projected_ctx); + LLAMA_LOG_INFO("%s: projected to use %" PRId64 " MiB of device memory vs. %" PRId64 " MiB of free device memory\n", + __func__, sum_projected_used/MiB, sum_total/MiB); + if (min_projected_free >= margin) { + if (nd == 1) { + LLAMA_LOG_INFO("%s: will leave %" PRId64 " >= %" PRId64 " MiB of free device memory, no changes needed\n", + __func__, min_projected_free/MiB, margin/MiB); + return; + } + LLAMA_LOG_INFO("%s: will leave at least %" PRId64 " >= %" PRId64 " MiB of free memory on all devices, no changes needed\n", + __func__, min_projected_free/MiB, margin/MiB); + return; + } + + // step 2: try reducing memory use by reducing the context size + + { + int64_t global_surplus = sum_projected_free - int64_t(nd)*margin; + if (global_surplus < 0) { + LLAMA_LOG_INFO(nd == 1 ? + "%s: cannot fulfill margin of %" PRId64 " MiB, need to reduce device memory by %" PRId64 " MiB\n" : + "%s: cannot fulfill margin of %" PRId64 " MiB on all devices, need to use %" PRId64 " MiB less in total\n", + __func__, margin/MiB, -global_surplus/MiB); + if (cparams->n_ctx == 0) { + if (hp_nct > n_ctx_min) { + const int64_t bytes_per_ctx = sum_projected_ctx / hp_nct; + const uint32_t ctx_reduction = std::min( + uint32_t((-global_surplus + bytes_per_ctx - 1) / bytes_per_ctx), hp_nct - n_ctx_min); + cparams->n_ctx = hp_nct - ctx_reduction; + const int64_t memory_reduction = ctx_reduction * bytes_per_ctx; + global_surplus += memory_reduction; + LLAMA_LOG_INFO("%s: context size reduced from %" PRIu32 " to %" PRIu32 " -> need %" PRId64 " MiB less memory in total\n", + __func__, hp_nct, cparams->n_ctx, memory_reduction/MiB); + } else { + LLAMA_LOG_INFO("%s: default model context size is %" PRIu32 " which is <= the min. context size of %" PRIu32 " -> no change\n", + __func__, hp_nct, n_ctx_min); + } + } else { + LLAMA_LOG_INFO("%s: context size set by user to %" PRIu32 " -> no change\n", __func__, cparams->n_ctx); + } + } + if (global_surplus >= 0) { + LLAMA_LOG_INFO("%s: entire model can be fit across devices by reducing context\n", __func__); + return; + } + } + + if (mparams->n_gpu_layers != default_mparams.n_gpu_layers) { + throw std::runtime_error("n_gpu_layers already set by user to " + std::to_string(mparams->n_gpu_layers) + ", abort"); + } + if (nd > 1) { + if (!tensor_split) { + throw std::runtime_error("did not provide a buffer to write the tensor_split to, abort"); + } + if (mparams->tensor_split) { + for (size_t id = 0; id < nd; id++) { + if (mparams->tensor_split[id] != 0.0f) { + throw std::runtime_error("model_params::tensor_split already set by user, abort"); + } + } + } + if (mparams->split_mode == LLAMA_SPLIT_MODE_ROW) { + throw std::runtime_error("changing weight allocation for LLAMA_SPLIT_MODE_ROW not implemented, abort"); + } + if (hp_ngl < 2*nd) { + throw std::runtime_error("model has only " + std::to_string(hp_ngl) + " layers but need at least " + + std::to_string(2*nd) + " to fit memory for " + std::to_string(nd) + " devices, abort"); + } + } + if (!tensor_buft_overrides) { + throw std::runtime_error("did not provide buffer to set tensor_buft_overrides, abort"); + } + if (mparams->tensor_buft_overrides && (mparams->tensor_buft_overrides->pattern || mparams->tensor_buft_overrides->buft)) { + throw std::runtime_error("model_params::tensor_buft_overrides already set by user, abort"); + } + + // step 3: iteratively fill the back to front with "dense" layers + // - for a dense model simply fill full layers, giving each device a contiguous slice of the model + // - for a MoE model, same as dense model but with all MoE tensors in system memory + + // utility function that returns a static C string matching the tensors for a specific layer index and layer fraction: + auto get_overflow_pattern = [&](const size_t il, const layer_fraction_t lf) -> const char * { + constexpr size_t n_strings = 1000; + if (il >= n_strings) { + throw std::runtime_error("at most " + std::to_string(n_strings) + " model layers are supported"); + } + switch (lf) { + case LAYER_FRACTION_ATTN: { + static std::array patterns; + if (patterns[il].empty()) { + patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_(up|gate|down).*"; + } + return patterns[il].c_str(); + } + case LAYER_FRACTION_UP: { + static std::array patterns; + if (patterns[il].empty()) { + patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_(gate|down).*"; + } + return patterns[il].c_str(); + } + case LAYER_FRACTION_GATE: { + static std::array patterns; + if (patterns[il].empty()) { + patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_down.*"; + } + return patterns[il].c_str(); + } + case LAYER_FRACTION_MOE: { + static std::array patterns; + if (patterns[il].empty()) { + patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_(up|down|gate)_(ch|)exps"; + } + return patterns[il].c_str(); + } + default: + GGML_ABORT("fatal error"); + } + }; + + struct ngl_t { + uint32_t n_layer = 0; // number of total layers + uint32_t n_part = 0; // number of partial layers, <= n_layer + + // for the first partial layer varying parts can overflow, all further layers use LAYER_FRACTION_MOE: + layer_fraction_t overflow_type = LAYER_FRACTION_MOE; + }; + + const size_t ntbo = llama_max_tensor_buft_overrides(); + + // utility function to set n_gpu_layers and tensor_split + auto set_ngl_tensor_split_tbo = [&]( + const std::vector & ngl_per_device, + const std::vector & overflow_bufts, + llama_model_params & mparams, + const bool add_nonrepeating) { + mparams.n_gpu_layers = 0; + for (size_t id = 0; id < nd; id++) { + mparams.n_gpu_layers += ngl_per_device[id].n_layer; + if (nd > 1) { + tensor_split[id] = ngl_per_device[id].n_layer; + } + } + assert(uint32_t(mparams.n_gpu_layers) <= hp_ngl); + uint32_t il0 = hp_ngl - mparams.n_gpu_layers; // start index for tensor buft overrides + + if (add_nonrepeating) { + mparams.n_gpu_layers += 1; + tensor_split[nd - 1] += 1; + } + mparams.tensor_split = tensor_split; + + size_t itbo = 0; + for (size_t id = 0; id < nd; id++) { + il0 += ngl_per_device[id].n_layer - ngl_per_device[id].n_part; + for (uint32_t il = il0; il < il0 + ngl_per_device[id].n_part; il++) { + if (itbo + 1 >= ntbo) { + tensor_buft_overrides[itbo].pattern = nullptr; + tensor_buft_overrides[itbo].buft = nullptr; + itbo++; + mparams.tensor_buft_overrides = tensor_buft_overrides; + throw std::runtime_error("llama_params_fit_n_tensor_buft_overrides() == " + + std::to_string(ntbo) + " is insufficient for model\n"); + } + tensor_buft_overrides[itbo].pattern = get_overflow_pattern(il, il == il0 ? ngl_per_device[id].overflow_type : LAYER_FRACTION_MOE); + tensor_buft_overrides[itbo].buft = overflow_bufts[id]; + itbo++; + } + il0 += ngl_per_device[id].n_part; + } + tensor_buft_overrides[itbo].pattern = nullptr; + tensor_buft_overrides[itbo].buft = nullptr; + itbo++; + mparams.tensor_buft_overrides = tensor_buft_overrides; + }; + + // utility function that returns the memory use per device for given numbers of layers per device + auto get_memory_for_layers = [&]( + const char * func_name, + const std::vector & ngl_per_device, + const std::vector & overflow_bufts, + const bool add_nonrepeating) -> std::vector { + llama_model_params mparams_copy = *mparams; + set_ngl_tensor_split_tbo(ngl_per_device, overflow_bufts, mparams_copy, add_nonrepeating); + + const dmds_t dmd_nl = llama_get_device_memory_data( + path_model, &mparams_copy, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level); + + LLAMA_LOG_DEBUG("%s: memory for test allocation by device:\n", func_name); + for (size_t id = 0; id < nd; id++) { + const ngl_t & n = ngl_per_device[id]; + LLAMA_LOG_DEBUG( + "%s: id=%zu, n_layer=%2" PRIu32 ", n_part=%2" PRIu32 ", overflow_type=%d, mem=%6" PRId64 " MiB\n", + func_name, id, n.n_layer, n.n_part, int(n.overflow_type), dmd_nl[id].mb.total()/MiB); + } + + std::vector ret; + ret.reserve(nd); + for (const llama_device_memory_data & dmd : dmd_nl) { + ret.push_back(dmd.mb.total()); + } + return ret; + }; + + int64_t global_surplus_cpu_moe = 0; + if (hp_nex > 0) { + const static std::string pattern_moe_all = "blk\\.\\d+\\.ffn_(up|down|gate)_(ch|)exps"; // matches all MoE tensors + ggml_backend_buffer_type_t cpu_buft = ggml_backend_cpu_buffer_type(); + tensor_buft_overrides[0] = {pattern_moe_all.c_str(), cpu_buft}; + tensor_buft_overrides[1] = {nullptr, nullptr}; + mparams->tensor_buft_overrides = tensor_buft_overrides; + + LLAMA_LOG_DEBUG("%s: getting device memory data with all MoE tensors moved to system memory:\n", __func__); + const dmds_t dmds_cpu_moe = llama_get_device_memory_data( + path_model, mparams, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level); + + for (const llama_device_memory_data & dmd : dmds_cpu_moe) { + global_surplus_cpu_moe += dmd.free; + global_surplus_cpu_moe -= int64_t(dmd.mb.total()) + margin; + } + + if (global_surplus_cpu_moe > 0) { + LLAMA_LOG_INFO("%s: with only dense weights in device memory there is a total surplus of %" PRId64 " MiB\n", + __func__, global_surplus_cpu_moe/MiB); + } else { + LLAMA_LOG_INFO("%s: with only dense weights in device memory there is still a total deficit of %" PRId64 " MiB\n", + __func__, -global_surplus_cpu_moe/MiB); + } + + // reset + tensor_buft_overrides[0] = {nullptr, nullptr}; + mparams->tensor_buft_overrides = tensor_buft_overrides; + } + + std::vector targets; // maximum acceptable memory use per device + targets.reserve(nd); + for (size_t id = 0; id < nd; id++) { + targets.push_back(dmds_full[id].free - margin); + LLAMA_LOG_DEBUG("%s: id=%zu, target=%" PRId64 " MiB\n", __func__, id, targets[id]/MiB); + } + + // whether for the optimal memory use we expect to load at least some MoE tensors: + const bool partial_moe = hp_nex > 0 && global_surplus_cpu_moe > 0; + + std::vector overflow_bufts; // which bufts the partial layers of a device overflow to: + overflow_bufts.reserve(nd); + for (size_t id = 0; id < nd - 1; ++id) { + overflow_bufts.push_back(ggml_backend_dev_buffer_type(devs[id + 1])); + } + overflow_bufts.push_back(ggml_backend_cpu_buffer_type()); + + std::vector ngl_per_device(nd); + std::vector mem = get_memory_for_layers(__func__, ngl_per_device, overflow_bufts, partial_moe); + if (hp_nex > 0) { + for (size_t id = 0; id < nd; id++) { + ngl_per_device[id].overflow_type = LAYER_FRACTION_MOE; + } + } + + // optimize the number of layers per device using the method of false position: + // - ngl_per_device has 0 layers for each device, lower bound + // - try a "high" configuration where a device is given all unassigned layers + // - interpolate the memory use / layer between low and high linearly to get a guess where it meets our target + // - check memory use of our guess, replace either the low or high bound + // - once we only have a difference of a single layer, stop and return the lower bound that just barely still fits + if (hp_nex == 0) { + LLAMA_LOG_INFO("%s: filling dense layers back-to-front:\n", __func__); + } else { + LLAMA_LOG_INFO("%s: filling dense-only layers back-to-front:\n", __func__); + } + uint32_t n_unassigned = hp_ngl; + for (int id = nd - 1; id >= 0; id--) { + std::vector ngl_per_device_high = ngl_per_device; + ngl_per_device_high[id].n_layer = n_unassigned; + if (hp_nex > 0) { + ngl_per_device_high[id].n_part = ngl_per_device_high[id].n_layer; + } + if (ngl_per_device_high[id].n_layer > 0) { + std::vector mem_high = get_memory_for_layers(__func__, ngl_per_device_high, overflow_bufts, partial_moe); + if (mem_high[id] > targets[id]) { + uint32_t delta = ngl_per_device_high[id].n_layer - ngl_per_device[id].n_layer; + while (delta > 1) { + uint32_t step_size = int64_t(delta) * (targets[id] - mem[id]) / (mem_high[id] - mem[id]); + step_size = std::max(step_size, uint32_t(1)); + step_size = std::min(step_size, delta - 1); + + std::vector ngl_per_device_test = ngl_per_device; + ngl_per_device_test[id].n_layer += step_size; + if (hp_nex) { + ngl_per_device_test[id].n_part += step_size; + } + const std::vector mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts, partial_moe); + + if (mem_test[id] <= targets[id]) { + ngl_per_device = ngl_per_device_test; + mem = mem_test; + n_unassigned -= ngl_per_device[id].n_layer; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device[id].n_layer); + } else { + ngl_per_device_high = ngl_per_device_test; + mem_high = mem_test; + LLAMA_LOG_DEBUG("%s: set ngl_per_device_high[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device[id].n_layer); + } + delta = ngl_per_device_high[id].n_layer - ngl_per_device[id].n_layer; + } + } else { + ngl_per_device = ngl_per_device_high; + n_unassigned -= ngl_per_device[id].n_layer; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device[id].n_layer); + } + } + + const int64_t projected_margin = dmds_full[id].free - mem[id]; + LLAMA_LOG_INFO( + "%s: - %s: %2" PRIu32 " layers, %6" PRId64 " MiB used, %6" PRId64 " MiB free\n", + __func__, dev_names[id].c_str(), ngl_per_device[id].n_layer, mem[id]/MiB, projected_margin/MiB); + } + if (hp_nex == 0 || global_surplus_cpu_moe <= 0) { + set_ngl_tensor_split_tbo(ngl_per_device, overflow_bufts, *mparams, partial_moe); + return; + } + + // step 4: for a MoE model where all dense tensors fit, + // convert the dense-only layers in the back to full layers in the front until all devices are full + // essentially the same procedure as for the dense-only layers except front-to-back + // also, try fitting at least part of one more layer to reduce waste for "small" GPUs with e.g. 24 GiB VRAM + + size_t id_dense_start = nd; + for (int id = nd - 1; id >= 0; id--) { + if (ngl_per_device[id].n_layer > 0) { + id_dense_start = id; + continue; + } + break; + } + assert(id_dense_start < nd); + + LLAMA_LOG_INFO("%s: converting dense-only layers to full layers and filling them front-to-back with overflow to next device/system memory:\n", __func__); + for (size_t id = 0; id <= id_dense_start; id++) { + std::vector ngl_per_device_high = ngl_per_device; + for (size_t jd = id_dense_start; jd < nd; jd++) { + const uint32_t n_layer_move = ngl_per_device_high[jd].n_layer; + ngl_per_device_high[id].n_layer += n_layer_move; + ngl_per_device_high[jd].n_layer -= n_layer_move; + ngl_per_device_high[jd].n_part = 0; + } + size_t id_dense_start_high = nd - 1; + std::vector mem_high = get_memory_for_layers(__func__, ngl_per_device_high, overflow_bufts, partial_moe); + + if (mem_high[id] > targets[id]) { + assert(ngl_per_device_high[id].n_layer >= ngl_per_device_high[id].n_part); + assert(ngl_per_device[id].n_layer >= ngl_per_device[id].n_part); + assert((ngl_per_device_high[id].n_layer - ngl_per_device_high[id].n_part) + >= ngl_per_device[id].n_layer - ngl_per_device[id].n_part); + uint32_t delta = (ngl_per_device_high[id].n_layer - ngl_per_device_high[id].n_part) + - (ngl_per_device[id].n_layer - ngl_per_device[id].n_part); + while (delta > 1) { + uint32_t step_size = int64_t(delta) * (targets[id] - mem[id]) / (mem_high[id] - mem[id]); + step_size = std::max(step_size, uint32_t(1)); + step_size = std::min(step_size, delta - 1); + + std::vector ngl_per_device_test = ngl_per_device; + size_t id_dense_start_test = id_dense_start; + uint32_t n_converted_test = 0; + for (;id_dense_start_test < nd; id_dense_start_test++) { + const uint32_t n_convert_jd = std::min(step_size - n_converted_test, ngl_per_device_test[id_dense_start_test].n_part); + ngl_per_device_test[id_dense_start_test].n_layer -= n_convert_jd; + ngl_per_device_test[id_dense_start_test].n_part -= n_convert_jd; + ngl_per_device_test[id].n_layer += n_convert_jd; + n_converted_test += n_convert_jd; + + if (ngl_per_device_test[id_dense_start_test].n_layer > 0) { + break; + } + } + const std::vector mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts, partial_moe); + + if (mem_test[id] <= targets[id]) { + ngl_per_device = ngl_per_device_test; + mem = mem_test; + id_dense_start = id_dense_start_test; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start=%zu\n", + __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start); + } else { + ngl_per_device_high = ngl_per_device_test; + mem_high = mem_test; + id_dense_start_high = id_dense_start_test; + LLAMA_LOG_DEBUG("%s: set ngl_per_device_high[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start_high=%zu\n", + __func__, id, ngl_per_device_high[id].n_layer, ngl_per_device_high[id].n_part, id_dense_start_high); + } + delta = (ngl_per_device_high[id].n_layer - ngl_per_device_high[id].n_part) + - (ngl_per_device[id].n_layer - ngl_per_device[id].n_part); + } + } else { + ngl_per_device = ngl_per_device_high; + id_dense_start = id_dense_start_high; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start=%zu\n", + __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start); + } + + // try to fit at least part of one more layer + if (ngl_per_device[id_dense_start].n_layer > 0) { + std::vector ngl_per_device_test = ngl_per_device; + size_t id_dense_start_test = id_dense_start; + ngl_per_device_test[id_dense_start_test].n_layer--; + ngl_per_device_test[id_dense_start_test].n_part--; + ngl_per_device_test[id].n_layer++; + ngl_per_device_test[id].n_part++; + if (ngl_per_device_test[id_dense_start_test].n_layer == 0) { + id_dense_start_test++; + } + ngl_per_device_test[id].overflow_type = LAYER_FRACTION_UP; + LLAMA_LOG_DEBUG("%s: trying to fit one extra layer with overflow_type=LAYER_FRACTION_UP\n", __func__); + std::vector mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts, partial_moe); + if (mem_test[id] < targets[id]) { + ngl_per_device = ngl_per_device_test; + mem = mem_test; + id_dense_start = id_dense_start_test; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part, overflow_type)=(%" PRIu32 ", %" PRIu32 ", UP), id_dense_start=%zu\n", + __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start); + + ngl_per_device_test[id].overflow_type = LAYER_FRACTION_GATE; + LLAMA_LOG_DEBUG("%s: trying to fit one extra layer with overflow_type=LAYER_FRACTION_GATE\n", __func__); + mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts, partial_moe); + if (mem_test[id] < targets[id]) { + ngl_per_device = ngl_per_device_test; + mem = mem_test; + id_dense_start = id_dense_start_test; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part, overflow_type)=(%" PRIu32 ", %" PRIu32 ", GATE), id_dense_start=%zu\n", + __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start); + } + } else { + ngl_per_device_test[id].overflow_type = LAYER_FRACTION_ATTN; + LLAMA_LOG_DEBUG("%s: trying to fit one extra layer with overflow_type=LAYER_FRACTION_ATTN\n", __func__); + mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts, partial_moe); + if (mem_test[id] < targets[id]) { + ngl_per_device = ngl_per_device_test; + mem = mem_test; + id_dense_start = id_dense_start_test; + LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part, overflow_type)=(%" PRIu32 ", %" PRIu32 ", ATTN), id_dense_start=%zu\n", + __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start); + } + } + } + + const int64_t projected_margin = dmds_full[id].free - mem[id]; + LLAMA_LOG_INFO( + "%s: - %s: %2" PRIu32 " layers (%2" PRIu32 " overflowing), %6" PRId64 " MiB used, %6" PRId64 " MiB free\n", + __func__, dev_names[id].c_str(), ngl_per_device[id].n_layer, ngl_per_device[id].n_part, mem[id]/MiB, projected_margin/MiB); + } + + set_ngl_tensor_split_tbo(ngl_per_device, overflow_bufts, *mparams, partial_moe); +} + +bool llama_params_fit( + const char * path_model, struct llama_model_params * mparams, struct llama_context_params * cparams, + float * tensor_split, struct llama_model_tensor_buft_override * tensor_buft_overrides, + size_t margin_s, uint32_t n_ctx_min, enum ggml_log_level log_level) { + const int64_t t0_us = llama_time_us(); + bool ok = true; + try { + llama_params_fit_impl(path_model, mparams, cparams, tensor_split, tensor_buft_overrides, margin_s, n_ctx_min, log_level); + LLAMA_LOG_INFO("%s: successfully fit params to free device memory\n", __func__); + } catch (const std::runtime_error & e) { + LLAMA_LOG_WARN("%s: failed to fit params to free device memory: %s\n", __func__, e.what()); + ok = false; + } + const int64_t t1_us = llama_time_us(); + LLAMA_LOG_INFO("%s: fitting params to free memory took %.2f seconds\n", __func__, (t1_us - t0_us) * 1e-6); + return ok; +} + struct llama_sampler_chain_params llama_sampler_chain_default_params() { struct llama_sampler_chain_params result = { /*.no_perf =*/ true, @@ -49,6 +692,10 @@ size_t llama_max_devices(void) { return 16; } +size_t llama_max_tensor_buft_overrides() { + return 4096; +} + bool llama_supports_mmap(void) { return llama_mmap::SUPPORTED; } @@ -108,11 +755,12 @@ static int llama_model_load(const std::string & fname, std::vector model.t_start_us = tm.t_start_us; try { - llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.kv_overrides, params.tensor_buft_overrides); + llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.no_alloc, params.kv_overrides, params.tensor_buft_overrides); ml.print_info(); model.hparams.vocab_only = params.vocab_only; + model.hparams.no_alloc = params.no_alloc; try { model.load_arch(ml); diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 43a0e81949..8df3f41003 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -37,4 +37,5 @@ else() add_subdirectory(cvector-generator) add_subdirectory(export-lora) endif() + add_subdirectory(fit-params) endif() diff --git a/tools/fit-params/CMakeLists.txt b/tools/fit-params/CMakeLists.txt new file mode 100644 index 0000000000..34c3373f83 --- /dev/null +++ b/tools/fit-params/CMakeLists.txt @@ -0,0 +1,8 @@ +set(TARGET llama-fit-params) +add_executable(${TARGET} fit-params.cpp) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) + +if(LLAMA_TOOLS_INSTALL) + install(TARGETS ${TARGET} RUNTIME) +endif() diff --git a/tools/fit-params/README.md b/tools/fit-params/README.md new file mode 100644 index 0000000000..8f0c958a2f --- /dev/null +++ b/tools/fit-params/README.md @@ -0,0 +1,55 @@ +# fit-params + +llama.cpp binaries can automatically fit the projected memory use of a model to the free device memory available at runtime, +this is controlled using the CLI arguments starting with `-fit`/`--fit`. +Internally the code is calling `llama_params_fit` to adjust the `llama_model_params` and `llama_context_params` structs. +`llama-fit-params` is a simple utility that prints the CLI arguments corresponding to these adjustments to stdout. +Example usage: + +``` bash +# First, run llama-fit-params and store the results in a file: +> ./build/bin/llama-fit-params --model /opt/models/qwen_3-30b3a-f16.gguf | tee args.txt +ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no +ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no +ggml_cuda_init: found 1 CUDA devices: + Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes +build: 6895 (4341dc8bc) with cc (GCC) 15.2.1 20250813 for x86_64-pc-linux-gnu +llama_params_fit_impl: projected to use 61807 MiB of device memory vs. 24077 MiB of free device memory +llama_params_fit_impl: cannot fulfill margin of 1024 MiB, need to reduce device memory by 42444 MiB +llama_params_fit_impl: context size reduced from 40960 to 4096 -> need 3456 MiB less memory in total +llama_params_fit_impl: with only dense weights in device memory there is a total surplus of 16164 MiB +llama_params_fit_impl: distributing layers across devices with overflow to next device/system memory: +llama_params_fit_impl: - CUDA0 (NVIDIA GeForce RTX 4090): 48 layers (34 overflowing), 19187 MiB used, 1199 MiB free +llama_params_fit: successfully fit params to free device memory +llama_params_fit: fitting params to free memory took 1.15 seconds +Printing fitted CLI arguments to stdout... +-c 4096 -ngl 48 -ot blk\.14\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.15\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.16\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.17\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.18\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.19\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.20\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.21\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.22\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.23\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.24\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.25\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.26\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.27\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.28\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.29\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.30\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.31\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.32\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.33\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.34\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.35\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.36\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.37\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.38\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.39\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.40\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.41\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.42\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.43\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.44\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.45\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.46\.ffn_(up|down|gate)_(ch|)exps=CPU,blk\.47\.ffn_(up|down|gate)_(ch|)exps=CPU + +# Next, use those results for a llama.cpp binary: +> cat args.txt | xargs ./build/bin/llama-server --model /opt/models/qwen_3-30b3a-f16.gguf +ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no +ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no +ggml_cuda_init: found 1 CUDA devices: + Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes +build: 6895 (4341dc8bc) with cc (GCC) 15.2.1 20250813 for x86_64-pc-linux-gnu +system info: n_threads = 16, n_threads_batch = 16, total_threads = 32 + +system_info: n_threads = 16 (n_threads_batch = 16) / 32 | CUDA : ARCHS = 890 | USE_GRAPHS = 1 | PEER_MAX_BATCH_SIZE = 128 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX_VNNI = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | BMI2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | LLAMAFILE = 1 | OPENMP = 1 | REPACK = 1 | + +main: binding port with default address family +main: HTTP server is listening, hostname: 127.0.0.1, port: 8080, http threads: 31 +main: loading model +srv load_model: loading model '/opt/models/qwen_3-30b3a-f16.gguf' +llama_params_fit_impl: projected to use 19187 MiB of device memory vs. 24077 MiB of free device memory +llama_params_fit_impl: will leave 1199 >= 1024 MiB of free device memory, no changes needed +llama_params_fit: successfully fit params to free device memory +llama_params_fit: fitting params to free memory took 0.28 seconds +[...] +main: server is listening on http://127.0.0.1:8080 - starting the main loop +srv update_slots: all slots are idle +^Csrv operator(): operator(): cleaning up before exit... + +llama_memory_breakdown_print: | memory breakdown [MiB] | total free self model context compute unaccounted | +llama_memory_breakdown_print: | - CUDA0 (RTX 4090) | 24077 = 945 + (19187 = 17904 + 384 + 898) + 3945 | +llama_memory_breakdown_print: | - Host | 58271 = 58259 + 0 + 12 | +``` diff --git a/tools/fit-params/fit-params.cpp b/tools/fit-params/fit-params.cpp new file mode 100644 index 0000000000..fbf7a2eb37 --- /dev/null +++ b/tools/fit-params/fit-params.cpp @@ -0,0 +1,62 @@ +#include "llama.h" + +#include "arg.h" +#include "common.h" +#include "log.h" + +#include + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +int main(int argc, char ** argv) { + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + return 1; + } + + common_init(); + llama_backend_init(); + llama_numa_init(params.numa); + auto mparams = common_model_params_to_llama(params); + auto cparams = common_context_params_to_llama(params); + llama_params_fit(params.model.path.c_str(), &mparams, &cparams, + params.tensor_split, params.tensor_buft_overrides.data(), params.fit_params_target, params.fit_params_min_ctx, + params.verbosity >= 4 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_ERROR); + + LOG_INF("Printing fitted CLI arguments to stdout...\n"); + std::cout << "-c " << cparams.n_ctx; + std::cout << " -ngl " << mparams.n_gpu_layers; + + size_t nd = llama_max_devices(); + while (nd > 1 && mparams.tensor_split[nd - 1] == 0.0f) { + nd--; + } + if (nd > 1) { + for (size_t id = 0; id < nd; id++) { + if (id == 0) { + std::cout << " -ts "; + } + if (id > 0) { + std::cout << ","; + } + std::cout << mparams.tensor_split[id]; + } + } + + const size_t ntbo = llama_max_tensor_buft_overrides(); + for (size_t itbo = 0; itbo < ntbo && mparams.tensor_buft_overrides[itbo].pattern != nullptr; itbo++) { + if (itbo == 0) { + std::cout << " -ot "; + } + if (itbo > 0) { + std::cout << ","; + } + std::cout << mparams.tensor_buft_overrides[itbo].pattern << "=" << ggml_backend_buft_name(mparams.tensor_buft_overrides[itbo].buft); + } + std::cout << "\n"; + + return 0; +}