llama : Changing off_t to size_t for Windows (#18204)

This commit is contained in:
Julius Tischbein 2025-12-19 15:42:46 +01:00 committed by GitHub
parent cc0a04343e
commit f99ef53d2a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 3 additions and 3 deletions

View File

@ -1086,10 +1086,10 @@ bool llama_model_loader::load_all_data(
} else { } else {
// If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU. // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
if (upload_backend) { if (upload_backend) {
auto offset = (off_t) weight->offs; size_t offset = weight->offs;
alignment = file->read_alignment(); alignment = file->read_alignment();
off_t aligned_offset = offset & ~(alignment - 1); size_t aligned_offset = offset & ~(alignment - 1);
off_t offset_from_alignment = offset - aligned_offset; size_t offset_from_alignment = offset - aligned_offset;
file->seek(aligned_offset, SEEK_SET); file->seek(aligned_offset, SEEK_SET);
// Calculate aligned read boundaries // Calculate aligned read boundaries