diff --git a/common/common.cpp b/common/common.cpp index 0b8047c480..26250abb6c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1195,9 +1195,6 @@ llama_context * common_init_result::context() { } common_sampler * common_init_result::sampler(llama_seq_id seq_id) { - if (pimpl->samplers.size() <= (size_t) seq_id) { - return nullptr; - } return pimpl->samplers[seq_id].get(); } diff --git a/tools/completion/completion.cpp b/tools/completion/completion.cpp index d9ac94babb..7f8da2a59d 100644 --- a/tools/completion/completion.cpp +++ b/tools/completion/completion.cpp @@ -143,16 +143,13 @@ int main(int argc, char ** argv) { ctx = llama_init->context(); model = llama_init->model(); - smpl = llama_init->sampler(0); - if (smpl == NULL) { - LOG_ERR("%s: error: unable to create sampler\n", __func__); - return 1; - } - if (ctx == NULL) { - LOG_ERR("%s: error: unable to create context\n", __func__); + if (model == nullptr || ctx == nullptr) { + LOG_ERR("%s : failed to init\n", __func__); return 1; } + + smpl = llama_init->sampler(0); llama_memory_t mem = llama_get_memory(ctx); const llama_vocab * vocab = llama_model_get_vocab(model);