1450 lines
52 KiB
C++
1450 lines
52 KiB
C++
#include "speculative.h"
|
||
|
||
#include "common.h"
|
||
#include "ggml.h"
|
||
#include "llama.h"
|
||
#include "log.h"
|
||
#include "ngram-cache.h"
|
||
#include "ngram-map.h"
|
||
#include "ngram-mod.h"
|
||
#include "sampling.h"
|
||
|
||
#include <algorithm>
|
||
#include <cstring>
|
||
#include <iomanip>
|
||
#include <map>
|
||
#include <cinttypes>
|
||
|
||
#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128
|
||
#define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
|
||
|
||
const std::vector<enum common_speculative_type> common_speculative_types = {
|
||
COMMON_SPECULATIVE_TYPE_NONE,
|
||
COMMON_SPECULATIVE_TYPE_DRAFT,
|
||
COMMON_SPECULATIVE_TYPE_EAGLE3,
|
||
COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE,
|
||
COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K,
|
||
COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V,
|
||
COMMON_SPECULATIVE_TYPE_NGRAM_MOD,
|
||
COMMON_SPECULATIVE_TYPE_NGRAM_CACHE
|
||
};
|
||
|
||
const std::map<std::string, enum common_speculative_type> common_speculative_type_from_name_map = {
|
||
{"none", COMMON_SPECULATIVE_TYPE_NONE},
|
||
{"draft", COMMON_SPECULATIVE_TYPE_DRAFT},
|
||
{"eagle3", COMMON_SPECULATIVE_TYPE_EAGLE3},
|
||
{"ngram_simple", COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE},
|
||
{"ngram_map_k", COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K},
|
||
{"ngram_map_k4v", COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V},
|
||
{"ngram_mod", COMMON_SPECULATIVE_TYPE_NGRAM_MOD},
|
||
{"ngram_cache", COMMON_SPECULATIVE_TYPE_NGRAM_CACHE}
|
||
};
|
||
|
||
struct common_speculative_config {
|
||
common_speculative_type type;
|
||
common_params_speculative params;
|
||
|
||
common_speculative_config(common_speculative_type t,
|
||
const common_params_speculative & p = common_params_speculative{}) : type(t), params(p) {}
|
||
};
|
||
|
||
static bool common_speculative_are_compatible(
|
||
const llama_model * model_tgt,
|
||
const llama_model * model_dft) {
|
||
const llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt);
|
||
const llama_vocab * vocab_dft = llama_model_get_vocab(model_dft);
|
||
|
||
const bool vocab_type_tgt = llama_vocab_type(vocab_tgt);
|
||
LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt);
|
||
|
||
const bool vocab_type_dft = llama_vocab_type(vocab_dft);
|
||
LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft);
|
||
|
||
if (vocab_type_tgt != vocab_type_dft) {
|
||
LOG_DBG("%s: draft model vocab type must match target model to use speculation but ", __func__);
|
||
LOG_DBG("vocab_type_dft = %d while vocab_type_tgt = %d\n", vocab_type_dft, vocab_type_tgt);
|
||
return false;
|
||
}
|
||
|
||
if (
|
||
llama_vocab_get_add_bos(vocab_tgt) != llama_vocab_get_add_bos(vocab_dft) ||
|
||
llama_vocab_get_add_eos(vocab_tgt) != llama_vocab_get_add_eos(vocab_dft) ||
|
||
llama_vocab_bos(vocab_tgt) != llama_vocab_bos(vocab_dft) ||
|
||
llama_vocab_eos(vocab_tgt) != llama_vocab_eos(vocab_dft)
|
||
) {
|
||
LOG_DBG("%s: draft model special tokens must match target model to use speculation\n", __func__);
|
||
return false;
|
||
}
|
||
|
||
{
|
||
const int n_vocab_tgt = llama_vocab_n_tokens(vocab_tgt);
|
||
const int n_vocab_dft = llama_vocab_n_tokens(vocab_dft);
|
||
const int vocab_diff = n_vocab_tgt > n_vocab_dft
|
||
? n_vocab_tgt - n_vocab_dft
|
||
: n_vocab_dft - n_vocab_tgt;
|
||
|
||
if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
|
||
LOG_DBG("%s: draft model vocab must closely match target model to use speculation but ", __func__);
|
||
LOG_DBG("target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
|
||
n_vocab_tgt, llama_vocab_n_tokens(vocab_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
|
||
return false;
|
||
}
|
||
|
||
for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
|
||
const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i);
|
||
const char * token_text_dft = llama_vocab_get_text(vocab_dft, i);
|
||
|
||
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
|
||
LOG_DBG("%s: draft model vocab must match target model to use speculation but ", __func__);
|
||
LOG_DBG("token %d content differs - target '%s', draft '%s'\n", i,
|
||
common_token_to_piece(vocab_tgt, i).c_str(),
|
||
common_token_to_piece(vocab_dft, i).c_str());
|
||
return false;
|
||
}
|
||
}
|
||
}
|
||
|
||
return true;
|
||
}
|
||
|
||
// state of an implementation of speculative decoding
|
||
//
|
||
// each implementation has a unique type and a state that is implementation-specific
|
||
// in a subclass of common_speculative_state
|
||
struct common_speculative_state {
|
||
const enum common_speculative_type type;
|
||
|
||
size_t n_call_begin = 0; // number of times this implementation was called for refresh.
|
||
size_t n_call_draft = 0; // number of times this implementation was called for generation.
|
||
size_t n_call_accept = 0; // number of times this implementation was called for accumulation.
|
||
|
||
size_t n_gen_drafts = 0; // number of times a draft or part was generated by this implementation.
|
||
size_t n_acc_drafts = 0; // number of times a draft or part was accepted by the target model.
|
||
size_t n_gen_tokens = 0; // number of tokens generated by this implementation.
|
||
size_t n_acc_tokens = 0; // number of tokens accepted by the target model.
|
||
|
||
// TODO: track performance of most recent calls
|
||
const bool gen_perf = true; // whether to generate performance stats.
|
||
|
||
int64_t t_begin_us = 0; // total time spent in refresh of this implementation in microseconds.
|
||
int64_t t_draft_us = 0; // total time spent in generating drafts in this implementation in microseconds.
|
||
int64_t t_accept_us = 0; // total time spent in accumulation of this implementation in microseconds.
|
||
|
||
common_speculative_state(enum common_speculative_type type) : type(type) {}
|
||
|
||
virtual ~common_speculative_state() = default;
|
||
|
||
virtual void begin(const llama_tokens & prompt) = 0;
|
||
|
||
virtual void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & result) = 0;
|
||
|
||
virtual void accept(uint16_t n_accepted) = 0;
|
||
};
|
||
|
||
struct common_speculative_checkpoint {
|
||
llama_pos pos_min = 0;
|
||
llama_pos pos_max = 0;
|
||
|
||
int64_t n_tokens = 0;
|
||
|
||
std::vector<uint8_t> data;
|
||
|
||
size_t size() const {
|
||
return data.size();
|
||
}
|
||
|
||
size_t ckpt_size = 0;
|
||
};
|
||
|
||
struct common_speculative_state_draft : public common_speculative_state {
|
||
llama_context * ctx_tgt; // only used for retokenizing from ctx_dft
|
||
llama_context * ctx_dft;
|
||
|
||
struct common_speculative_checkpoint ckpt;
|
||
bool use_checkpoint;
|
||
|
||
common_sampler * smpl;
|
||
|
||
llama_batch batch;
|
||
llama_tokens prompt_dft;
|
||
|
||
bool vocab_cmpt = true; // whether retokenization is needed
|
||
std::unordered_map<std::string, std::string> vocab_map;
|
||
|
||
common_speculative_state_draft(
|
||
enum common_speculative_type type,
|
||
llama_context * ctx_tgt,
|
||
llama_context * ctx_dft,
|
||
const std::vector<std::pair<std::string, std::string>> & replacements,
|
||
bool use_checkpoint)
|
||
: common_speculative_state(type)
|
||
, ctx_tgt(ctx_tgt)
|
||
, ctx_dft(ctx_dft)
|
||
, use_checkpoint(use_checkpoint)
|
||
{
|
||
batch = llama_batch_init(llama_n_batch(ctx_dft), 0, 1);
|
||
smpl = nullptr;
|
||
|
||
// TODO: optimize or pass from outside?
|
||
// {
|
||
// common_params_sampling params;
|
||
// params.no_perf = false;
|
||
//
|
||
// params.top_k = 40;
|
||
// params.top_p = 0.9;
|
||
//
|
||
// params.samplers = {
|
||
// COMMON_SAMPLER_TYPE_TOP_K,
|
||
// COMMON_SAMPLER_TYPE_TOP_P,
|
||
// COMMON_SAMPLER_TYPE_INFILL,
|
||
// };
|
||
//
|
||
// result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
|
||
// }
|
||
{
|
||
common_params_sampling params;
|
||
params.no_perf = false;
|
||
params.top_k = 10;
|
||
params.samplers = {
|
||
COMMON_SAMPLER_TYPE_TOP_K,
|
||
};
|
||
|
||
smpl = common_sampler_init(llama_get_model(ctx_dft), params);
|
||
}
|
||
|
||
vocab_cmpt = common_speculative_are_compatible(llama_get_model(ctx_tgt), llama_get_model(ctx_dft));
|
||
LOG_DBG("vocab_cmpt = %d\n", vocab_cmpt);
|
||
|
||
if (!vocab_cmpt) {
|
||
LOG_WRN("the target and draft vocabs are not compatible - tokens will be translated between the two\n");
|
||
|
||
for (const auto & pair : replacements) {
|
||
vocab_map[pair.first] = pair.second;
|
||
}
|
||
}
|
||
}
|
||
|
||
~common_speculative_state_draft() override {
|
||
llama_perf_context_print(ctx_dft);
|
||
|
||
llama_free(ctx_dft);
|
||
|
||
common_sampler_free(smpl);
|
||
|
||
llama_batch_free(batch);
|
||
}
|
||
|
||
void begin(const llama_tokens & prompt) override {
|
||
if (use_checkpoint && ckpt.size() > 0) {
|
||
// delete checkpoint
|
||
LOG_DBG("%s: delete checkpoint, prompt.size=%zu, pos_min=%d, pos_max=%d, n_tokens=%" PRId64 ", size=%.3f MiB\n",
|
||
__func__, prompt.size(), ckpt.pos_min, ckpt.pos_max, ckpt.n_tokens, (float) ckpt.data.size() / 1024 / 1024);
|
||
ckpt.pos_min = 0;
|
||
ckpt.pos_max = 0;
|
||
ckpt.n_tokens = 0;
|
||
ckpt.ckpt_size = 0;
|
||
ckpt.data.clear();
|
||
}
|
||
}
|
||
|
||
size_t draft_init_checkpoint(int n_tokens_prompt, int n_tokens_batch) {
|
||
int slot_id = 0;
|
||
const size_t checkpoint_size = llama_state_seq_get_size_ext(ctx_dft, slot_id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY);
|
||
|
||
ckpt.pos_min = llama_memory_seq_pos_min(llama_get_memory(ctx_dft), slot_id);
|
||
ckpt.pos_max = llama_memory_seq_pos_max(llama_get_memory(ctx_dft), slot_id);
|
||
ckpt.n_tokens = n_tokens_prompt - n_tokens_batch;
|
||
ckpt.data.resize(checkpoint_size);
|
||
|
||
const size_t n = llama_state_seq_get_data_ext(ctx_dft, ckpt.data.data(), checkpoint_size, slot_id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY);
|
||
if (n != checkpoint_size) {
|
||
GGML_ABORT("checkpoint size mismatch: expected %zu, got %zu\n", checkpoint_size, n);
|
||
}
|
||
|
||
LOG_DBG("%s: pos_min = %d, pos_max = %d, size = %.3f MiB\n", __func__,
|
||
ckpt.pos_min, ckpt.pos_max, (float) ckpt.data.size() / 1024 / 1024);
|
||
return n;
|
||
}
|
||
|
||
size_t draft_restore_checkpoint(size_t ckpt_size_part_expected) {
|
||
int slot_id = 0;
|
||
LOG_DBG("%s: pos_min = %d, pos_max = %d\n", __func__, ckpt.pos_min, ckpt.pos_max);
|
||
const size_t n = llama_state_seq_set_data_ext(ctx_dft,
|
||
ckpt.data.data(), ckpt.size(), slot_id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY);
|
||
if (n != ckpt_size_part_expected) {
|
||
GGML_ABORT("%s: failed to restore context checkpoint (pos_min=%d, pos_max=%d, size=%zu, get_data_ext->%zu, set_data_ext->%zu",
|
||
__func__, ckpt.pos_min, ckpt.pos_max, ckpt.size(), ckpt_size_part_expected, n);
|
||
}
|
||
return n;
|
||
}
|
||
|
||
void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & result) override {
|
||
auto * spec = this;
|
||
|
||
auto & batch = spec->batch;
|
||
auto & ctx_tgt = spec->ctx_tgt;
|
||
auto & ctx_dft = spec->ctx_dft;
|
||
auto & smpl = spec->smpl;
|
||
auto & prompt_dft = spec->prompt_dft;
|
||
|
||
auto * mem_dft = llama_get_memory(ctx_dft);
|
||
|
||
int reuse_i = 0; // index of part to be reused in prompt_dft
|
||
int reuse_n = 0; // length of part to be reused in prompt_dft
|
||
|
||
const int n_ctx = llama_n_ctx(ctx_dft) - params.n_max;
|
||
|
||
llama_tokens prompt_cnv;
|
||
if (!spec->vocab_cmpt) {
|
||
std::string text;
|
||
|
||
text = common_detokenize(ctx_tgt, prompt_tgt, true);
|
||
text = replace_to_dft(text);
|
||
|
||
LOG_DBG("%s: main->draft detokenized string: '%s'\n", __func__, text.c_str());
|
||
|
||
prompt_cnv = common_tokenize(ctx_dft, text, false, true);
|
||
|
||
// convert id_last to draft vocab. llama_detokenize is called directly to avoid an allocation
|
||
const auto * model_tgt = llama_get_model(ctx_tgt);
|
||
const auto * vocab_tgt = llama_model_get_vocab(model_tgt);
|
||
|
||
int32_t n_chars = llama_detokenize(vocab_tgt, &id_last, 1, nullptr, 0, false, false);
|
||
GGML_ASSERT(n_chars < 0 && "failed to detokenize id_last");
|
||
|
||
text.resize(-n_chars);
|
||
llama_detokenize(vocab_tgt, &id_last, 1, text.data(), text.size(), false, false);
|
||
text = replace_to_dft(text);
|
||
|
||
LOG_DBG("main->draft detokenized id_last(%d): '%s'\n", id_last, text.c_str());
|
||
id_last = common_tokenize(ctx_dft, text, false, true)[0];
|
||
}
|
||
|
||
const llama_tokens & prompt_cur = spec->vocab_cmpt ? prompt_tgt : prompt_cnv;
|
||
|
||
const int i_start = std::max<int>(0, (int) prompt_cur.size() - n_ctx);
|
||
|
||
// reuse as much as possible from the old draft context
|
||
// ideally, the draft context should be as big as the target context and we will always reuse the entire prompt
|
||
for (int i = 0; i < (int) prompt_dft.size(); ++i) {
|
||
int cur = 0;
|
||
while (i_start + cur < (int) prompt_cur.size() &&
|
||
i + cur < (int) prompt_dft.size() &&
|
||
prompt_cur[i_start + cur] == prompt_dft[i + cur]) {
|
||
cur++;
|
||
}
|
||
|
||
if ((cur >= 256 || n_ctx >= (int) prompt_cur.size()) && cur > reuse_n) {
|
||
reuse_i = i;
|
||
reuse_n = cur;
|
||
}
|
||
}
|
||
|
||
LOG_DBG("%s: reuse_i = %d, reuse_n = %d, #prompt_dft = %zu, #prompt_cur = %zu\n",
|
||
__func__, reuse_i, reuse_n, prompt_dft.size(), prompt_cur.size());
|
||
if (use_checkpoint && ckpt.ckpt_size == 0 && reuse_n > 0) {
|
||
LOG_DBG("%s: no checkpoint available, no reuse, (reuse_i=%d, reuse_n=%d) -> (0, 0)\n",
|
||
__func__, reuse_i, reuse_n);
|
||
reuse_i = 0;
|
||
reuse_n = 0;
|
||
}
|
||
|
||
result.clear();
|
||
result.reserve(params.n_max);
|
||
|
||
bool needs_ckpt = use_checkpoint && prompt_dft.size() > 0;
|
||
if (reuse_n == 0 || (use_checkpoint && reuse_i > 0)) {
|
||
llama_memory_clear(mem_dft, false);
|
||
prompt_dft.clear();
|
||
} else {
|
||
// this happens when a previous draft has been discarded (for example, due to being too small), but the
|
||
// target model agreed with it. in this case, we simply pass back the previous results to save compute
|
||
if (reuse_i + reuse_n < (int64_t) prompt_dft.size() && prompt_dft[reuse_i + reuse_n] == id_last) {
|
||
for (int i = reuse_i + reuse_n + 1; i < (int) prompt_dft.size(); ++i) {
|
||
result.push_back(prompt_dft[i]);
|
||
|
||
if (params.n_max <= (int) result.size()) {
|
||
break;
|
||
}
|
||
}
|
||
|
||
return;
|
||
}
|
||
|
||
bool do_restore = false;
|
||
if (prompt_dft.size() > prompt_cur.size() && reuse_i + reuse_n < (int64_t) prompt_dft.size()) {
|
||
// This can happen after a partial acceptance (speculative decoding with checkpoints)
|
||
LOG_DBG("%s: #prompt_dft=%zu, #prompt_cur=%zu, shorten draft\n",
|
||
__func__, prompt_dft.size(), prompt_cur.size());
|
||
prompt_dft.resize(prompt_cur.size());
|
||
do_restore = true;
|
||
}
|
||
|
||
if (reuse_i > 0) {
|
||
bool is_removed = llama_memory_seq_rm (mem_dft, 0, 0, reuse_i);
|
||
if (!is_removed) {
|
||
LOG_ERR("%s: llama_memory_seq_rm failed, reuse_i=%d\n", __func__, reuse_i);
|
||
}
|
||
llama_memory_seq_add(mem_dft, 0, reuse_i, -1, -reuse_i);
|
||
|
||
prompt_dft.erase(prompt_dft.begin(), prompt_dft.begin() + reuse_i);
|
||
}
|
||
|
||
if (reuse_n < (int) prompt_dft.size() || do_restore) {
|
||
if (use_checkpoint) {
|
||
if (ckpt.n_tokens > (int64_t) prompt_dft.size()) {
|
||
LOG_INF("%s: checkpoint is too large, prompt_tgt.size=%zu, ckpt.n_tokens=%zu, reuse_n=%d, prompt_dft.size=%zu\n",
|
||
__func__, prompt_tgt.size(), ckpt.n_tokens, reuse_n, prompt_dft.size());
|
||
}
|
||
draft_restore_checkpoint(ckpt.ckpt_size);
|
||
reuse_n = ckpt.n_tokens;
|
||
prompt_dft.resize(reuse_n);
|
||
needs_ckpt = false;
|
||
} else {
|
||
bool is_removed = llama_memory_seq_rm (mem_dft, 0, reuse_n, -1);
|
||
if (!is_removed) {
|
||
LOG_ERR("%s: llama_memory_seq_rm failed, reuse_n=%d, prompt_dft.size=%zu\n",
|
||
__func__, reuse_n, prompt_dft.size());
|
||
}
|
||
prompt_dft.erase(prompt_dft.begin() + reuse_n, prompt_dft.end());
|
||
}
|
||
}
|
||
}
|
||
|
||
if (needs_ckpt && use_checkpoint) {
|
||
ckpt.ckpt_size = draft_init_checkpoint(prompt_dft.size(), batch.n_tokens);
|
||
}
|
||
|
||
// prepare a batch to evaluate any new tokens in the prompt
|
||
common_batch_clear(batch);
|
||
|
||
for (size_t i = i_start + reuse_n; i < prompt_cur.size(); ++i) {
|
||
//LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_cur[i]);
|
||
common_batch_add(batch, prompt_cur[i], i - i_start, { 0 }, false);
|
||
|
||
prompt_dft.push_back(prompt_cur[i]);
|
||
}
|
||
|
||
// we should rarely end-up here during normal decoding
|
||
if (batch.n_tokens > 0) {
|
||
//LOG_DBG("%s: draft prompt batch: %s\n", __func__, string_from(ctx, batch).c_str());
|
||
|
||
int ret = llama_decode(ctx_dft, batch);
|
||
if (ret != 0 && ret != 1) {
|
||
LOG_WRN("%s: llama_decode returned %d, prompt_cur.size=%zu\n",
|
||
__func__, ret, prompt_cur.size());
|
||
}
|
||
}
|
||
|
||
const llama_pos n_past = prompt_dft.size();
|
||
|
||
LOG_DBG("%s: n_past = %d\n", __func__, n_past);
|
||
|
||
common_batch_clear(batch);
|
||
common_batch_add (batch, id_last, n_past, { 0 }, true);
|
||
|
||
prompt_dft.push_back(id_last);
|
||
|
||
LOG_DBG("%s: draft prompt: %s\n", __func__, string_from(ctx_dft, prompt_dft).c_str());
|
||
|
||
int ret = llama_decode(ctx_dft, batch);
|
||
if (ret != 0 && ret != 1) {
|
||
LOG_WRN("%s: llama_decode returned %d, prompt_cur.size=%zu, prompt_dft.size=%zu\n",
|
||
__func__, ret, prompt_cur.size(), prompt_dft.size());
|
||
}
|
||
|
||
common_sampler_reset(smpl);
|
||
|
||
// sample n_draft tokens from the draft model
|
||
for (int i = 0; i < params.n_max; ++i) {
|
||
common_batch_clear(batch);
|
||
|
||
common_sampler_sample(smpl, ctx_dft, 0, true);
|
||
|
||
const auto * cur_p = common_sampler_get_candidates(smpl, true);
|
||
|
||
for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) {
|
||
LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n",
|
||
k, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
|
||
}
|
||
|
||
// add drafted token for each sequence
|
||
const llama_token id = cur_p->data[0].id;
|
||
|
||
common_sampler_accept(smpl, id, true);
|
||
|
||
result.push_back(id);
|
||
|
||
if (params.n_max <= (int) result.size()) {
|
||
break;
|
||
}
|
||
|
||
// only collect very high-confidence draft tokens
|
||
if (cur_p->data[0].p < params.p_min) {
|
||
break;
|
||
}
|
||
|
||
common_batch_add(batch, id, n_past + i + 1, { 0 }, true);
|
||
|
||
// evaluate the drafted tokens on the draft model
|
||
ret = llama_decode(ctx_dft, batch);
|
||
if (ret != 0) {
|
||
LOG_WRN("%s: llama_decode[%d] returned %d, prompt_cur.size=%zu, prompt_dft.size=%zu\n",
|
||
__func__, i, ret, prompt_cur.size(), prompt_dft.size());
|
||
}
|
||
|
||
prompt_dft.push_back(id);
|
||
}
|
||
|
||
if (!spec->vocab_cmpt) {
|
||
std::string detokenized = common_detokenize(ctx_dft, result, true);
|
||
detokenized = replace_to_tgt(detokenized);
|
||
LOG_DBG("draft->main detokenized string: '%s'\n", detokenized.c_str());
|
||
result = common_tokenize(ctx_tgt, detokenized, false, true);
|
||
if (result.size() > (size_t)params.n_max) {
|
||
result.resize(params.n_max);
|
||
}
|
||
}
|
||
}
|
||
|
||
void accept(uint16_t n_accepted) override {
|
||
// noop
|
||
GGML_UNUSED(n_accepted);
|
||
}
|
||
|
||
std::string replace_to_dft(const std::string & input) const {
|
||
std::string result = input;
|
||
|
||
for (const auto & pair : this->vocab_map) {
|
||
size_t pos = result.find(pair.first);
|
||
while (pos != std::string::npos) {
|
||
result.replace(pos, pair.first.length(), pair.second);
|
||
pos = result.find(pair.first, pos + pair.second.length());
|
||
}
|
||
}
|
||
|
||
return result;
|
||
}
|
||
|
||
std::string replace_to_tgt(const std::string & input) const {
|
||
std::string result = input;
|
||
|
||
for (const auto & pair : this->vocab_map) {
|
||
size_t pos = result.find(pair.second);
|
||
while (pos != std::string::npos) {
|
||
result.replace(pos, pair.second.length(), pair.first);
|
||
pos = result.find(pair.second, pos + pair.first.length());
|
||
}
|
||
}
|
||
|
||
return result;
|
||
}
|
||
};
|
||
|
||
struct common_speculative_state_eagle3 : public common_speculative_state {
|
||
common_speculative_state_eagle3(enum common_speculative_type type) : common_speculative_state(type) {}
|
||
|
||
void begin(const llama_tokens & prompt) override {
|
||
GGML_UNUSED(prompt);
|
||
}
|
||
|
||
void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & draft_tokens) override {
|
||
// TODO: implement
|
||
GGML_UNUSED(params);
|
||
GGML_UNUSED(prompt_tgt);
|
||
GGML_UNUSED(id_last);
|
||
GGML_UNUSED(draft_tokens);
|
||
}
|
||
|
||
void accept(uint16_t n_accepted) override {
|
||
// noop
|
||
GGML_UNUSED(n_accepted);
|
||
}
|
||
};
|
||
|
||
// state of self-speculation (simple implementation, not ngram-map)
|
||
struct common_speculative_state_ngram_simple : public common_speculative_state {
|
||
common_ngram_simple_config config;
|
||
|
||
common_speculative_state_ngram_simple(
|
||
enum common_speculative_type type,
|
||
common_ngram_simple_config config)
|
||
: common_speculative_state(type), config(config) {}
|
||
|
||
void begin(const llama_tokens & prompt) override {
|
||
GGML_UNUSED(prompt);
|
||
}
|
||
|
||
void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & result) override {
|
||
|
||
result = common_ngram_simple_draft(config, prompt_tgt, id_last);
|
||
GGML_UNUSED(params);
|
||
}
|
||
|
||
void accept(uint16_t n_accepted) override {
|
||
// noop
|
||
GGML_UNUSED(n_accepted);
|
||
}
|
||
};
|
||
|
||
struct common_speculative_state_ngram_map_k : public common_speculative_state {
|
||
// draft ngram map for speculative decoding without draft model
|
||
common_ngram_map map;
|
||
|
||
common_speculative_state_ngram_map_k(
|
||
enum common_speculative_type type,
|
||
common_ngram_map map)
|
||
: common_speculative_state(type), map(std::move(map)) {}
|
||
|
||
void begin(const llama_tokens & prompt) override {
|
||
common_ngram_map_begin(map, prompt);
|
||
}
|
||
|
||
void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & result) override {
|
||
common_ngram_map_draft(map, prompt_tgt, id_last, result);
|
||
GGML_UNUSED(params);
|
||
}
|
||
|
||
void accept(uint16_t n_accepted) override {
|
||
common_ngram_map_accept(map, n_accepted);
|
||
}
|
||
};
|
||
|
||
struct common_speculative_state_ngram_mod : public common_speculative_state {
|
||
common_ngram_mod & mod;
|
||
|
||
// the last position in the prompt that was added to the ngram container
|
||
size_t i_last = 0;
|
||
|
||
// length of the last drafted n‑gram (number of tokens returned by draft)
|
||
size_t n_draft_last = 0;
|
||
|
||
// consecutive accept rounds with low acceptance fraction (< 0.5)
|
||
int n_low = 0;
|
||
|
||
// enable trace logging if LLAMA_TRACE is set
|
||
const bool verbose;
|
||
|
||
common_speculative_state_ngram_mod(enum common_speculative_type type, common_ngram_mod & mod)
|
||
: common_speculative_state(type), mod(mod), verbose(std::getenv("LLAMA_TRACE") != nullptr) {
|
||
static_assert(sizeof(llama_token) == sizeof(common_ngram_mod::entry_t));
|
||
}
|
||
|
||
void begin(const llama_tokens & prompt) override {
|
||
i_last = 0;
|
||
|
||
n_draft_last = 0;
|
||
|
||
const size_t n = mod.get_n();
|
||
|
||
if (prompt.size() < n) {
|
||
return;
|
||
}
|
||
|
||
for (size_t i = 0; i < prompt.size() - n; ++i) {
|
||
mod.add(prompt.data() + i);
|
||
}
|
||
|
||
i_last = prompt.size() - n;
|
||
|
||
const double f = (double)mod.get_used() / (double)mod.size();
|
||
LOG_INF("%s: ngram_mod occupancy = %zu/%zu (%.2f)\n", __func__, mod.get_used(), mod.size(), f);
|
||
|
||
constexpr double f_thold = 0.25;
|
||
if (f > f_thold) {
|
||
LOG_WRN("%s: ngram_mod occupancy %.2f exceeds threshold (%.2f) - resetting\n", __func__, f, f_thold);
|
||
|
||
mod.reset();
|
||
}
|
||
}
|
||
|
||
void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & result) override {
|
||
GGML_UNUSED(params);
|
||
|
||
n_draft_last = 0;
|
||
|
||
const size_t cur_len = prompt_tgt.size();
|
||
if (cur_len < mod.get_n()) {
|
||
return;
|
||
}
|
||
|
||
const size_t n = mod.get_n();
|
||
|
||
// add new ngrams in chunks
|
||
if (i_last + 32 < cur_len) {
|
||
for (size_t i = i_last; i < cur_len - n; ++i) {
|
||
mod.add(prompt_tgt.data() + i);
|
||
}
|
||
|
||
i_last = cur_len - n;
|
||
}
|
||
|
||
result.resize(n + params.n_max);
|
||
for (size_t i = 0; i < n - 1; ++i) {
|
||
result[i] = prompt_tgt[cur_len - n + 1 + i];
|
||
}
|
||
result[n - 1] = id_last;
|
||
|
||
for (int i = 0; i < params.n_max; ++i) {
|
||
const llama_token token = mod.get(result.data() + i);
|
||
if (token == common_ngram_mod::EMPTY) {
|
||
if (i < params.n_min) {
|
||
result.clear();
|
||
return;
|
||
}
|
||
|
||
result.resize(n + i);
|
||
break;
|
||
}
|
||
result[n + i] = token;
|
||
}
|
||
|
||
// only return the m tokens that were drafted
|
||
for (size_t i = 0; n + i < result.size(); ++i) {
|
||
result[i] = result[n + i];
|
||
}
|
||
result.resize(result.size() - n);
|
||
|
||
// store length of drafted n‑gram for later acceptance analysis
|
||
n_draft_last = result.size();
|
||
}
|
||
|
||
void accept(uint16_t n_accepted) override {
|
||
if (verbose) {
|
||
LOG_INF("%s: accepted %d tokens from %zu drafted tokens\n", __func__, n_accepted, n_draft_last);
|
||
}
|
||
|
||
// compute acceptance fraction if we have a recorded draft length
|
||
if (n_draft_last > 0) {
|
||
const double f_acc = (double)n_accepted / (double)n_draft_last;
|
||
if (f_acc < 0.5) {
|
||
n_low++;
|
||
if (n_low >= 3) {
|
||
LOG_WRN("%s: low acceptance streak (%d) – resetting ngram_mod\n", __func__, n_low);
|
||
|
||
mod.reset();
|
||
n_low = 0;
|
||
}
|
||
} else {
|
||
n_low = 0;
|
||
}
|
||
}
|
||
}
|
||
};
|
||
|
||
struct common_speculative_state_ngram_cache : public common_speculative_state {
|
||
uint16_t n_draft;
|
||
bool save_dynamic;
|
||
bool save_static;
|
||
|
||
common_ngram_cache ngram_cache_context;
|
||
common_ngram_cache ngram_cache_dynamic;
|
||
common_ngram_cache ngram_cache_static;
|
||
|
||
size_t cache_size = 0; // number of tokens in n-gram cache
|
||
|
||
common_speculative_state_ngram_cache(
|
||
const enum common_speculative_type type,
|
||
const std::string & path_static,
|
||
const std::string & path_dynamic,
|
||
uint16_t n_draft,
|
||
bool save_dynamic,
|
||
bool save_static)
|
||
: common_speculative_state(type)
|
||
, n_draft(n_draft)
|
||
, save_dynamic(save_dynamic)
|
||
, save_static(save_static)
|
||
{
|
||
if (!path_static.empty()) {
|
||
try {
|
||
ngram_cache_static = common_ngram_cache_load(path_static);
|
||
} catch (...) {
|
||
LOG_ERR("failed to open static lookup cache: %s", path_static.c_str());
|
||
GGML_ABORT("Couldn't read static lookup cache");
|
||
}
|
||
}
|
||
|
||
if (!path_dynamic.empty()) {
|
||
try {
|
||
ngram_cache_dynamic = common_ngram_cache_load(path_dynamic);
|
||
} catch (...) {
|
||
LOG_ERR("failed to open dynamic lookup cache: %s", path_dynamic.c_str());
|
||
GGML_ABORT("Couldn't read dynamic lookup cache");
|
||
}
|
||
}
|
||
}
|
||
|
||
void begin(const llama_tokens & prompt) override {
|
||
GGML_UNUSED(prompt);
|
||
}
|
||
|
||
void draft(
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt,
|
||
llama_token id_last,
|
||
llama_tokens & result) override {
|
||
GGML_UNUSED(params);
|
||
|
||
if (cache_size < prompt_tgt.size() + 1) {
|
||
llama_tokens tokens_new;
|
||
tokens_new.reserve(prompt_tgt.size() + 1 - cache_size);
|
||
for (size_t j = cache_size; j < prompt_tgt.size(); ++j) {
|
||
tokens_new.push_back(prompt_tgt[j]);
|
||
}
|
||
tokens_new.push_back(id_last); // add the last token
|
||
|
||
// Update context ngram cache with new prompt_tgt:
|
||
common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX,
|
||
tokens_new, tokens_new.size(), false);
|
||
cache_size = prompt_tgt.size() + 1;
|
||
}
|
||
|
||
llama_tokens inp;
|
||
inp.reserve(prompt_tgt.size() + 1);
|
||
for (size_t j = 0; j < prompt_tgt.size(); ++j) {
|
||
inp.push_back(prompt_tgt[j]);
|
||
}
|
||
inp.push_back(id_last);
|
||
|
||
result.push_back(id_last);
|
||
|
||
common_ngram_cache_draft(inp, result, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX,
|
||
ngram_cache_context,
|
||
ngram_cache_dynamic,
|
||
ngram_cache_static);
|
||
|
||
if (result.size() > 0) {
|
||
// delete first token in result (which is the id_last token)
|
||
result.erase(result.begin());
|
||
}
|
||
}
|
||
|
||
void accept(uint16_t n_accepted) override {
|
||
// TODO: noop
|
||
GGML_UNUSED(n_accepted);
|
||
}
|
||
};
|
||
|
||
struct common_speculative {
|
||
std::vector<std::unique_ptr<common_speculative_state>> impls; // list of implementations to use and their states
|
||
common_speculative_state * curr_impl = nullptr; // current implementation in use (for stats)
|
||
};
|
||
|
||
static common_ngram_map get_common_ngram_map(const common_speculative_config & config) {
|
||
uint16_t size_key = config.params.ngram_size_n;
|
||
uint16_t size_value = config.params.ngram_size_m;
|
||
bool key_only = (config.type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K);
|
||
uint16_t min_hits = config.params.ngram_min_hits;
|
||
|
||
return common_ngram_map(size_key, size_value, key_only, min_hits);
|
||
}
|
||
|
||
static common_speculative_state_ngram_cache create_state_ngram_cache(
|
||
const std::string & path_static, const std::string & path_dynamic,
|
||
const common_speculative_config & config) {
|
||
uint16_t n_draft = 8; // TODO get from config?
|
||
|
||
// TODO bool param in common/common.h to set save_static/save_dynamic?
|
||
bool save_static = false;
|
||
bool save_dynamic = false;
|
||
|
||
common_speculative_state_ngram_cache state(config.type, path_static, path_dynamic, n_draft, save_static, save_dynamic);
|
||
|
||
return state;
|
||
}
|
||
|
||
std::string common_speculative_type_name_str() {
|
||
std::string result;
|
||
for (size_t i = 0; i < common_speculative_types.size(); i++) {
|
||
if (i > 0) {
|
||
result += ", ";
|
||
}
|
||
result += common_speculative_type_to_str(common_speculative_types[i]);
|
||
}
|
||
return result;
|
||
}
|
||
|
||
std::string common_speculative_type_to_str(enum common_speculative_type type) {
|
||
switch (type) {
|
||
case COMMON_SPECULATIVE_TYPE_NONE: return "none";
|
||
case COMMON_SPECULATIVE_TYPE_DRAFT: return "draft";
|
||
case COMMON_SPECULATIVE_TYPE_EAGLE3: return "eagle3";
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE: return "ngram_simple";
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K: return "ngram_map_k";
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V: return "ngram_map_k4v";
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_MOD: return "ngram_mod";
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_CACHE: return "ngram_cache";
|
||
default: return "unknown";
|
||
}
|
||
}
|
||
|
||
enum common_speculative_type common_speculative_type_from_name(const std::string & name) {
|
||
const auto it = common_speculative_type_from_name_map.find(name);
|
||
if (it == common_speculative_type_from_name_map.end()) {
|
||
return COMMON_SPECULATIVE_TYPE_COUNT;
|
||
}
|
||
return it->second;
|
||
}
|
||
|
||
common_speculative_compat_type common_speculative_is_compat(llama_context * ctx_tgt) {
|
||
auto * mem = llama_get_memory(ctx_tgt);
|
||
if (mem == nullptr) {
|
||
return COMMON_SPECULATIVE_COMPAT_TYPE_NO;
|
||
}
|
||
|
||
common_speculative_compat_type res = COMMON_SPECULATIVE_COMPAT_TYPE_FULL;
|
||
|
||
llama_memory_clear(mem, true);
|
||
|
||
// eval 2 tokens to check if the context is compatible
|
||
std::vector<llama_token> tmp;
|
||
tmp.push_back(0);
|
||
tmp.push_back(0);
|
||
|
||
int ret = llama_decode(ctx_tgt, llama_batch_get_one(tmp.data(), tmp.size()));
|
||
if (ret != 0) {
|
||
LOG_ERR("%s: llama_decode() failed: %d\n", __func__, ret);
|
||
res = COMMON_SPECULATIVE_COMPAT_TYPE_NO;
|
||
goto done;
|
||
}
|
||
|
||
// try to remove the last tokens
|
||
if (!llama_memory_seq_rm(mem, 0, 1, -1)) {
|
||
LOG_WRN("%s: the target context does not support partial sequence removal\n", __func__);
|
||
res = COMMON_SPECULATIVE_COMPAT_TYPE_CKPT;
|
||
goto done;
|
||
}
|
||
|
||
done:
|
||
llama_memory_clear(mem, true);
|
||
llama_synchronize(ctx_tgt);
|
||
|
||
return res;
|
||
}
|
||
|
||
// initialization of the speculative decoding system
|
||
//
|
||
common_speculative * common_speculative_init(
|
||
common_params_speculative & params,
|
||
llama_context * ctx_tgt) {
|
||
llama_context * ctx_dft = nullptr;
|
||
if (params.model_dft) {
|
||
ctx_dft = llama_init_from_model(params.model_dft, params.cparams_dft);
|
||
if (ctx_dft == nullptr) {
|
||
LOG_ERR("%s", "failed to create draft context\n");
|
||
return nullptr;
|
||
}
|
||
}
|
||
|
||
// Compute the implementations to use based on the config and their order of preference
|
||
std::vector<common_speculative_config> configs = {}; // list of speculative configs to try
|
||
{
|
||
bool has_draft = !params.mparams_dft.path.empty();
|
||
bool has_draft_eagle3 = false; // TODO PR-18039: if params.speculative.eagle3
|
||
|
||
bool has_ngram_cache = (params.type == COMMON_SPECULATIVE_TYPE_NGRAM_CACHE);
|
||
bool has_ngram_simple = (params.type == COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE);
|
||
bool has_ngram_map_k = (params.type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K);
|
||
bool has_ngram_map_k4v = (params.type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V);
|
||
bool has_ngram_mod = (params.type == COMMON_SPECULATIVE_TYPE_NGRAM_MOD);
|
||
|
||
// In a more complex implementation we could use the same implementation but with different parameters.
|
||
// This was initially used in PR-18471 but removed to simplify the code.
|
||
if (has_ngram_simple) {
|
||
// This implementation can guess a lot of tokens without any draft model.
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE, params));
|
||
}
|
||
if (has_ngram_map_k) {
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K, params));
|
||
}
|
||
if (has_ngram_map_k4v) {
|
||
// This implementation can guess tokens with high acceptance rate but is more expensive.
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V, params));
|
||
}
|
||
if (has_ngram_mod) {
|
||
// shared instance for all speculative decoding contexts
|
||
if (!params.ngram_mod) {
|
||
params.ngram_mod = std::make_shared<common_ngram_mod>(params.ngram_size_n, 4*1024*1024);
|
||
|
||
LOG_INF("%s: initialized ngram_mod with n=%d, size=%zu (%.3f MB)\n", __func__,
|
||
params.ngram_size_n, params.ngram_mod->size(),
|
||
(float)(params.ngram_mod->size_bytes())/1024/1024);
|
||
|
||
if (params.ngram_size_n < 16) {
|
||
LOG_WRN("%s: ngram_mod n=%d is too small - poor quality is possible, see: https://github.com/ggml-org/llama.cpp/pull/19164\n", __func__, params.ngram_size_n);
|
||
}
|
||
}
|
||
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_NGRAM_MOD, params));
|
||
}
|
||
if (has_ngram_cache) {
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_NGRAM_CACHE, params));
|
||
}
|
||
if (has_draft) {
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_DRAFT, params));
|
||
}
|
||
if (has_draft_eagle3) {
|
||
configs.push_back(common_speculative_config(COMMON_SPECULATIVE_TYPE_EAGLE3, params));
|
||
}
|
||
}
|
||
|
||
std::vector<std::unique_ptr<common_speculative_state>> impls = {};
|
||
|
||
for (const common_speculative_config & config : configs) {
|
||
LOG_DBG("%s: adding implementation %s\n", __func__, common_speculative_type_to_str(config.type).c_str());
|
||
switch (config.type) {
|
||
case COMMON_SPECULATIVE_TYPE_NONE:
|
||
break;
|
||
case COMMON_SPECULATIVE_TYPE_DRAFT: {
|
||
impls.push_back(std::make_unique<common_speculative_state_draft>(config.type,
|
||
/* .ctx_tgt = */ ctx_tgt,
|
||
/* .ctx_dft = */ ctx_dft,
|
||
/* .replacements = */ params.replacements,
|
||
/* .use_checkpoint= */ params.use_checkpoints
|
||
));
|
||
break;
|
||
}
|
||
case COMMON_SPECULATIVE_TYPE_EAGLE3: {
|
||
impls.push_back(std::make_unique<common_speculative_state_eagle3>(config.type));
|
||
break;
|
||
}
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE: {
|
||
common_ngram_map ngram_map = get_common_ngram_map(config);
|
||
|
||
uint16_t ngram_size_key = ngram_map.size_key;
|
||
uint16_t mgram_size_value = ngram_map.size_value;
|
||
|
||
auto config_simple = common_ngram_simple_config {
|
||
/* .size_ngram = */ ngram_size_key,
|
||
/* .size_mgram = */ mgram_size_value
|
||
};
|
||
auto state = std::make_unique<common_speculative_state_ngram_simple>(
|
||
/* .type = */ config.type,
|
||
/* .state = */ config_simple
|
||
);
|
||
impls.push_back(std::move(state));
|
||
break;
|
||
}
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K:
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V: {
|
||
impls.push_back(std::make_unique<common_speculative_state_ngram_map_k>(
|
||
(config.type),
|
||
get_common_ngram_map(config)
|
||
));
|
||
break;
|
||
}
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_MOD: {
|
||
GGML_ASSERT(config.params.ngram_mod);
|
||
impls.push_back(std::make_unique<common_speculative_state_ngram_mod>(config.type, *config.params.ngram_mod));
|
||
break;
|
||
}
|
||
case COMMON_SPECULATIVE_TYPE_NGRAM_CACHE: {
|
||
auto state = create_state_ngram_cache(
|
||
params.lookup_cache_static, params.lookup_cache_dynamic, config);
|
||
impls.push_back(std::make_unique<common_speculative_state_ngram_cache>(state));
|
||
break;
|
||
}
|
||
default:
|
||
break;
|
||
}
|
||
}
|
||
|
||
if (impls.empty()) {
|
||
LOG_WRN("%s", "no implementations specified for speculative decoding\n");
|
||
return nullptr;
|
||
}
|
||
|
||
auto * result = new common_speculative {
|
||
/* .impls = */ std::move(impls)
|
||
};
|
||
|
||
return result;
|
||
}
|
||
|
||
void common_speculative_free(common_speculative * spec) {
|
||
if (spec == nullptr) {
|
||
return;
|
||
}
|
||
|
||
delete spec;
|
||
}
|
||
|
||
void common_speculative_begin(common_speculative * spec, const llama_tokens & prompt) {
|
||
if (spec == nullptr) {
|
||
return;
|
||
}
|
||
|
||
for (auto & impl : spec->impls) {
|
||
common_time_meas tm(impl->t_begin_us, !impl->gen_perf);
|
||
impl->begin(prompt);
|
||
impl->n_call_begin++;
|
||
}
|
||
}
|
||
|
||
llama_tokens common_speculative_draft(
|
||
common_speculative * spec,
|
||
const common_params_speculative & params,
|
||
const llama_tokens & prompt_tgt, // specified in target model vocab
|
||
llama_token id_last) {
|
||
llama_tokens result;
|
||
|
||
spec->curr_impl = nullptr; // reset current implementation
|
||
|
||
for (auto & impl : spec->impls) {
|
||
{
|
||
common_time_meas tm(impl->t_draft_us, !impl->gen_perf);
|
||
impl->draft(params, prompt_tgt, id_last, result);
|
||
impl->n_call_draft++;
|
||
}
|
||
|
||
if (!result.empty()) {
|
||
LOG_DBG("%s: called impl %s, hist size = %zu, call_count = %zu, gen = %zu\n", __func__,
|
||
common_speculative_type_to_str(impl.get()->type).c_str(), prompt_tgt.size(),
|
||
impl.get()->n_call_draft, result.size());
|
||
|
||
spec->curr_impl = impl.get(); // set current implementation for stats
|
||
impl->n_gen_drafts++;
|
||
impl->n_gen_tokens += result.size();
|
||
|
||
break; // We have a draft, so break out of the loop and return it.
|
||
}
|
||
}
|
||
|
||
return result;
|
||
}
|
||
|
||
void common_speculative_accept(common_speculative * spec, uint16_t n_accepted) {
|
||
if (n_accepted == 0) {
|
||
return;
|
||
}
|
||
|
||
common_speculative_state * impl = spec->curr_impl;
|
||
|
||
GGML_ASSERT(impl);
|
||
|
||
{
|
||
common_time_meas tm(impl->t_accept_us, !impl->gen_perf);
|
||
if (n_accepted > 0) {
|
||
impl->n_acc_drafts++;
|
||
impl->n_acc_tokens += n_accepted;
|
||
}
|
||
|
||
impl->accept(n_accepted);
|
||
impl->n_call_accept++;
|
||
}
|
||
}
|
||
|
||
void common_speculative_print_stats(const common_speculative * spec) {
|
||
if (spec == nullptr) {
|
||
return;
|
||
}
|
||
|
||
for (const auto & impl : spec->impls) {
|
||
std::string str_perf;
|
||
if (impl->gen_perf) {
|
||
std::ostringstream oss;
|
||
oss << std::fixed << std::setprecision(3) << impl->t_begin_us / 1000.0 << ", ";
|
||
oss << std::fixed << std::setprecision(3) << impl->t_draft_us / 1000.0 << ", ";
|
||
oss << std::fixed << std::setprecision(3) << impl->t_accept_us / 1000.0;
|
||
str_perf = ", dur(b,g,a) = " + oss.str() + " ms";
|
||
} else {
|
||
str_perf = "";
|
||
}
|
||
|
||
LOG_INF("statistics %s: #calls(b,g,a) = %zu %zu %zu, #gen drafts = %zu, #acc drafts = %zu, #gen tokens = %zu, #acc tokens = %zu%s\n",
|
||
common_speculative_type_to_str(impl->type).c_str(),
|
||
impl->n_call_begin, impl->n_call_draft, impl->n_call_accept,
|
||
impl->n_gen_drafts,
|
||
impl->n_acc_drafts,
|
||
impl->n_gen_tokens,
|
||
impl->n_acc_tokens,
|
||
str_perf.c_str());
|
||
}
|
||
}
|
||
|
||
|
||
// server callbacks
|
||
//
|
||
|
||
common_speculative_callback::~common_speculative_callback() = default;
|
||
|
||
// server session
|
||
//
|
||
struct common_speculative_session::impl {
|
||
common_speculative_callback & callback;
|
||
common_params_speculative params_spec;
|
||
|
||
llama_context * ctx_tgt = nullptr;
|
||
|
||
common_speculative * spec = nullptr;
|
||
|
||
// `i_batch_dft`, idx of draft tokens in the main batch are stored in the caller
|
||
|
||
llama_tokens draft;
|
||
|
||
// use of checkpoints in speculative mode
|
||
bool spec_has_ckpt = false; // true if a checkpoint for rollback after partial speculation has been created
|
||
uint16_t spec_ckpt_n_denials = 0; // number of drafts not accepted at the current position (0 or 1)
|
||
size_t spec_ckpt_size_part = 0; // size of partial checkpoint
|
||
|
||
// Speculative decoding stats
|
||
int32_t n_draft_total = 0; // Total draft tokens generated
|
||
int32_t n_draft_accepted = 0; // Draft tokens actually accepted
|
||
|
||
impl(common_speculative_callback & callback,
|
||
const common_params_speculative & params,
|
||
llama_context * ctx_tgt)
|
||
: callback(callback), params_spec(params), ctx_tgt(ctx_tgt) {
|
||
spec = common_speculative_init(params_spec, ctx_tgt);
|
||
}
|
||
|
||
void begin(const llama_tokens & prompt_history) {
|
||
common_speculative_begin(spec, prompt_history);
|
||
}
|
||
|
||
bool has_batch_dft() {
|
||
return !draft.empty();
|
||
}
|
||
|
||
void clear_draft() {
|
||
draft.clear();
|
||
spec_ckpt_n_denials = 0;
|
||
}
|
||
|
||
llama_tokens compute_draft(
|
||
const llama_tokens & cached_text_tokens,
|
||
llama_token id_last,
|
||
const int n_draft_max) {
|
||
if (spec == nullptr) {
|
||
// no implementation, nothing to do
|
||
clear_draft();
|
||
return draft;
|
||
}
|
||
|
||
if (n_draft_max == 0) {
|
||
clear_draft();
|
||
return draft;
|
||
}
|
||
if (params_spec.use_checkpoints && spec_ckpt_n_denials > 1) {
|
||
// We shouldn't get two denials.
|
||
LOG_WRN("%s: #tokens=%zu, spec_ckpt_n_denials=%d, id_last=%d, #draft=%zu\n", __func__,
|
||
cached_text_tokens.size(), spec_ckpt_n_denials, id_last, draft.size());
|
||
clear_draft();
|
||
return draft;
|
||
}
|
||
|
||
if (spec_ckpt_n_denials == 1) {
|
||
// there is a previous speculation which wasn't accepted in full length
|
||
if (draft.empty()) {
|
||
// switch to non-draft inference
|
||
LOG_DBG("%s: draft of length 0 after denied checkpoint\n", __func__);
|
||
return draft;
|
||
}
|
||
// we use the shortened draft of previous speculation
|
||
LOG_DBG("%s: reuse shortened draft, #tokens=%zu, id_last=%d, size=%zu\n", __func__,
|
||
cached_text_tokens.size(), id_last, draft.size());
|
||
} else if (spec_ckpt_n_denials > 1) {
|
||
GGML_ABORT("illegal state: spec_ckpt_n_denials = %d > 1", spec_ckpt_n_denials);
|
||
} else {
|
||
// call the speculative implementation to create a draft
|
||
draft = common_speculative_draft(spec, params_spec, cached_text_tokens, id_last);
|
||
LOG_DBG("draft: id_last=%d, #draft=%zu\n", id_last, draft.size());
|
||
if (draft.empty()) {
|
||
clear_draft();
|
||
return draft;
|
||
}
|
||
}
|
||
|
||
if (draft.size() > (size_t) n_draft_max) {
|
||
LOG_WRN("draft size %d exceeds max %d, truncating\n", (int) draft.size(), n_draft_max);
|
||
draft.resize(n_draft_max);
|
||
}
|
||
|
||
bool do_checkpoint = !draft.empty() && params_spec.use_checkpoints;
|
||
if (do_checkpoint && cached_text_tokens.size() > 5 && draft.size() >= 3) {
|
||
LOG_DBG("%s: #tokens=%zu, draft.size=%zu, n_spec_denials=%d, do_checkpoint=%s, id_last=%d, tokens=[..., %d, %d, %d], draft=[%d, %d, %d, ...]\n",
|
||
__func__,
|
||
cached_text_tokens.size(),
|
||
draft.size(), spec_ckpt_n_denials,
|
||
do_checkpoint ? "yes" : "no", id_last,
|
||
cached_text_tokens[cached_text_tokens.size() - 3],
|
||
cached_text_tokens[cached_text_tokens.size() - 2],
|
||
cached_text_tokens[cached_text_tokens.size() - 1],
|
||
draft[0], draft[1], draft[2]);
|
||
}
|
||
|
||
if (params_spec.n_min > (int) draft.size()) {
|
||
LOG_DBG("ignoring small draft: %d < %d\n", (int) draft.size(), params_spec.n_min);
|
||
clear_draft();
|
||
return draft;
|
||
}
|
||
|
||
if (do_checkpoint) {
|
||
const size_t n = callback.create_checkpoint();
|
||
if (n == 0) {
|
||
LOG_WRN("%s: checkpoint creation failed (#tokens=%zu)\n", __func__, cached_text_tokens.size());
|
||
clear_draft();
|
||
return draft;
|
||
}
|
||
spec_ckpt_size_part = n;
|
||
spec_has_ckpt = true;
|
||
}
|
||
|
||
// add last sampled token to the batch
|
||
callback.batch_add_token(id_last, true);
|
||
|
||
// add all drafted tokens to the batch
|
||
for (size_t i = 0; i < draft.size(); i++) {
|
||
callback.batch_add_token(draft[i], true);
|
||
}
|
||
|
||
return draft;
|
||
}
|
||
|
||
common_speculative_accept_response sample_and_accept() {
|
||
const size_t n_draft = draft.size();
|
||
|
||
// the accepted tokens from the speculation
|
||
auto ids = callback.sampler_sample_and_accept_n(draft);
|
||
|
||
LOG_DBG("%s: n_draft=%zu, ids.size=%zu\n", __func__, n_draft, ids.size());
|
||
if (ids.size() < n_draft + 1) {
|
||
// the main model rejected some tokens
|
||
|
||
// we shorten the draft
|
||
draft.resize(ids.size() - 1);
|
||
if (spec_has_ckpt) {
|
||
// we need to rollback to the state before sampling the draft tokens
|
||
// (restore_checkpoint shortens context and slot.prompt.tokens)
|
||
const size_t n = callback.restore_checkpoint(spec_ckpt_size_part);
|
||
LOG_DBG("%s: partial acceptance: %zu < %zu, restored checkpoint: got %zu bytes\n",
|
||
__func__,
|
||
ids.size() - 1, n_draft, n);
|
||
|
||
// delete Checkpoint
|
||
callback.delete_checkpoint();
|
||
spec_has_ckpt = false;
|
||
|
||
spec_ckpt_n_denials++;
|
||
if (ids.size() > 1u + static_cast<std::size_t>(params_spec.n_min) && spec_ckpt_n_denials == 1) {
|
||
// we will do the batch again but with the shortened draft
|
||
return common_speculative_accept_response(std::move(ids), n_draft, true);
|
||
}
|
||
|
||
LOG_DBG("%s: don't accept partial draft, n_draft=%zu, ids.size=%zu\n", __func__, n_draft, ids.size());
|
||
draft.clear();
|
||
|
||
// use the sampled token only
|
||
ids.resize(1);
|
||
// drafted tokens in prompt have been deleted in restore_checkpoint(...).
|
||
|
||
// skip acceptance, don't calculate a new draft
|
||
return common_speculative_accept_response{std::move(ids), 0, true};
|
||
}
|
||
}
|
||
const size_t draft_size_accepted = draft.size();
|
||
LOG_DBG("%s: draft.size=%zu, ids.size=%zu\n", __func__, draft_size_accepted, ids.size());
|
||
common_speculative_accept(spec, draft_size_accepted);
|
||
draft.clear();
|
||
|
||
return common_speculative_accept_response{std::move(ids), n_draft, false};
|
||
}
|
||
|
||
void rewind(llama_pos p0) {
|
||
spec_ckpt_n_denials = 0;
|
||
if (spec_has_ckpt) {
|
||
// Delete Checkpoint
|
||
callback.delete_checkpoint();
|
||
spec_has_ckpt = false;
|
||
} else {
|
||
callback.memory_seq_rm(p0, -1);
|
||
}
|
||
}
|
||
|
||
void print_stats() const {
|
||
if (spec == nullptr) {
|
||
return;
|
||
}
|
||
|
||
common_speculative_print_stats(spec);
|
||
}
|
||
|
||
void reset() {
|
||
if (spec == nullptr) {
|
||
return;
|
||
}
|
||
|
||
clear_draft();
|
||
|
||
spec_has_ckpt = false;
|
||
spec_ckpt_size_part = 0;
|
||
}
|
||
};
|
||
|
||
common_speculative_session::common_speculative_session(
|
||
common_speculative_callback & callback,
|
||
const common_params_speculative & params,
|
||
llama_context * ctx_tgt) : p_impl(new impl{callback, params, ctx_tgt}) {
|
||
}
|
||
|
||
common_speculative_session::~common_speculative_session() {
|
||
common_speculative_free(p_impl->spec);
|
||
delete p_impl;
|
||
}
|
||
|
||
void common_speculative_session::begin(const llama_tokens & prompt_history) {
|
||
p_impl->begin(prompt_history);
|
||
}
|
||
|
||
bool common_speculative_session::has_batch_dft() {
|
||
return !p_impl->has_batch_dft();
|
||
}
|
||
|
||
llama_tokens common_speculative_session::compute_draft(
|
||
const llama_tokens & prompt,
|
||
llama_token id_last,
|
||
int n_draft_max_slot) {
|
||
return p_impl->compute_draft(prompt, id_last, n_draft_max_slot);
|
||
}
|
||
|
||
common_speculative_accept_response common_speculative_session::sample_and_accept() {
|
||
return p_impl->sample_and_accept();
|
||
}
|
||
|
||
void common_speculative_session::rewind(const llama_pos p0) {
|
||
p_impl->rewind(p0);
|
||
}
|
||
|
||
void common_speculative_session::print_stats() const {
|
||
p_impl->print_stats();
|
||
}
|
||
|
||
void common_speculative_session::reset() {
|
||
p_impl->reset();
|
||
}
|
||
|