impl : use 6 digits for tensor dims (#20094)
Many models have vocabulary sizes, and thus tensor shapes, with more than 5 digits (ex: Gemma 3's vocab size is 262,208). I already fixed this for `llama_format_tensor_shape` but missed it for `llama_format_tensor_shape` until now. Oops.
This commit is contained in:
parent
cb8f4fa3f8
commit
c99909dd0b
|
|
@ -100,9 +100,9 @@ std::string format(const char * fmt, ...) {
|
|||
|
||||
std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
|
||||
char buf[256];
|
||||
snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
|
||||
snprintf(buf, sizeof(buf), "%6" PRId64, ne.at(0));
|
||||
for (size_t i = 1; i < ne.size(); i++) {
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %6" PRId64, ne.at(i));
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue