move buffer_view to llama-impl.h

This commit is contained in:
Daniel Bevenius 2026-02-06 07:57:47 +01:00
parent 766e7e3876
commit ac6d09c63c
No known key found for this signature in database
3 changed files with 14 additions and 13 deletions

View File

@ -1363,7 +1363,7 @@ static std::map<llama_seq_id, uint32_t> build_seq_to_output_row(const llama_ubat
static void copy_tensor_async_ints(
const std::map<llama_seq_id, ggml_tensor*> & tensor_map,
const llama_context::buffer_view<llama_token> & sampled,
const buffer_view<llama_token> & sampled,
const std::map<llama_seq_id, uint32_t> & seq_to_row,
ggml_backend_sched_t sched) {
if (!sampled.has_data()) {
@ -1388,7 +1388,7 @@ static void copy_tensor_async_ints(
static void copy_tensor_async_floats(
const std::map<llama_seq_id, ggml_tensor*> & tensor_map,
const llama_context::buffer_view<float> & dst,
const buffer_view<float> & dst,
size_t stride,
std::vector<uint32_t> & counts,
const std::map<llama_seq_id, uint32_t> & seq_to_row,
@ -1419,7 +1419,7 @@ static void copy_tensor_async_floats(
static void copy_tensor_async_candidates(
const std::map<llama_seq_id, ggml_tensor*> & tensor_map,
const llama_context::buffer_view<llama_token> & dst,
const buffer_view<llama_token> & dst,
size_t stride,
std::vector<uint32_t> & counts,
const std::map<llama_seq_id, uint32_t> & seq_to_row,

View File

@ -4,6 +4,7 @@
#include "llama-cparams.h"
#include "llama-graph.h"
#include "llama-adapter.h"
#include "llama-impl.h"
#include "ggml-cpp.h"
#include "ggml-opt.h"
@ -238,16 +239,6 @@ public:
bool set_sampler(llama_seq_id seq_id, llama_sampler * sampler);
template <typename T>
struct buffer_view {
T * data;
size_t size = 0;
bool has_data() const {
return data && size > 0;
}
};
private:
llm_graph_params graph_params(
llm_graph_result * res,

View File

@ -49,6 +49,16 @@ struct time_meas {
int64_t & t_acc;
};
template <typename T>
struct buffer_view {
T * data;
size_t size = 0;
bool has_data() const {
return data && size > 0;
}
};
void replace_all(std::string & s, const std::string & search, const std::string & replace);
// TODO: rename to llama_format ?