removed extra line in llama-vocab.cpp. Added back the comment in llama-graph.cpp

This commit is contained in:
Yee Man Chan 2026-02-04 07:45:57 +08:00
parent 8ec5b08409
commit 82215a053b
2 changed files with 1 additions and 1 deletions

View File

@ -2207,6 +2207,7 @@ ggml_tensor * llm_graph_context::build_rs(
ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main);
ggml_build_forward_expand(gf, output_states);
// copy extra states which won't be changed further (between n_seqs and n_rs)
ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra);
ggml_build_forward_expand(gf,
ggml_cpy(ctx0,

View File

@ -2233,7 +2233,6 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|| t.first == "<|end_of_text|>" // granite
|| t.first == "<EOT>"
|| t.first == "_<EOT>"
|| t.first == "_<EOT>"
|| t.first == "[EOT]" // Kimi-K2
|| t.first == "<end▁of▁sentence>" // DeepSeek
|| t.first == "<end_of_utterance>" // smoldocling