From 8ee538ce73de1afbf6924a38c2d016e496c26497 Mon Sep 17 00:00:00 2001 From: thecaptain789 <257642323+thecaptain789@users.noreply.github.com> Date: Wed, 11 Feb 2026 06:05:31 +0000 Subject: [PATCH] llama : correct typos 'occured' and 'occurences' (#19414) Co-authored-by: thecaptain789 --- common/ngram-map.cpp | 2 +- common/ngram-map.h | 4 ++-- include/llama.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/common/ngram-map.cpp b/common/ngram-map.cpp index 2b876a6e99..ebf771a24a 100644 --- a/common/ngram-map.cpp +++ b/common/ngram-map.cpp @@ -461,7 +461,7 @@ void common_ngram_map_draft(common_ngram_map & map, slot_max = v; } } - // What is sum of the other occurences? + // What is sum of the other occurrences? uint32_t sum_occur = 0; for (int v = 0; v < COMMON_NGRAM_MAX_VALUES; ++v) { if (v == slot_max) { diff --git a/common/ngram-map.h b/common/ngram-map.h index 41b9530449..d84e719151 100644 --- a/common/ngram-map.h +++ b/common/ngram-map.h @@ -44,7 +44,7 @@ llama_tokens common_ngram_simple_draft( // statistics of a m-gram after a known n-gram struct common_ngram_map_value { size_t value_idx = 0; // index of value m-gram in token-history (0 if unused) - uint16_t value_num = 0; // number of occurences of this value m-gram after the key n-gram (0 in an unused values-slot) + uint16_t value_num = 0; // number of occurrences of this value m-gram after the key n-gram (0 in an unused values-slot) int16_t n_accepted = -1; // number of accepted tokens at last draft (-1 if unused) }; @@ -53,7 +53,7 @@ struct common_ngram_map_key { size_t key_idx; // index of key n-gram in token-history size_t stat_idx; // index of last token of stastistics computation (key_num, values) - uint16_t key_num; // number of occurences of this key n-gram in token-history + uint16_t key_num; // number of occurrences of this key n-gram in token-history common_ngram_map_value values[COMMON_NGRAM_MAX_VALUES]; // some known values after the key }; diff --git a/include/llama.h b/include/llama.h index bf4e28a8be..46c3672e98 100644 --- a/include/llama.h +++ b/include/llama.h @@ -482,7 +482,7 @@ extern "C" { enum llama_params_fit_status { LLAMA_PARAMS_FIT_STATUS_SUCCESS = 0, // found allocations that are projected to fit LLAMA_PARAMS_FIT_STATUS_FAILURE = 1, // could not find allocations that are projected to fit - LLAMA_PARAMS_FIT_STATUS_ERROR = 2, // a hard error occured, e.g. because no model could be found at the specified path + LLAMA_PARAMS_FIT_STATUS_ERROR = 2, // a hard error occurred, e.g. because no model could be found at the specified path }; // fits mparams and cparams to free device memory (assumes system memory is unlimited)