From 73e61d5b755f371864f928afafa31ffc0c15a008 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 16 Feb 2026 10:30:10 +0200 Subject: [PATCH] rename --- examples/llama-eval/README.md | 2 +- examples/llama-eval/{llama-eval-new.py => llama-eval.py} | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) rename examples/llama-eval/{llama-eval-new.py => llama-eval.py} (100%) diff --git a/examples/llama-eval/README.md b/examples/llama-eval/README.md index 1c96cc6a1f..89408db823 100644 --- a/examples/llama-eval/README.md +++ b/examples/llama-eval/README.md @@ -14,7 +14,7 @@ Simple evaluation tool for llama.cpp with support for multiple datasets. ## Usage ```bash -python llama-eval-new.py \ +python llama-eval.py \ --server http://127.0.0.1:8013 \ --model gpt-oss-20b-hf-low \ --judge-model gpt-oss-20b-hf-medium \ diff --git a/examples/llama-eval/llama-eval-new.py b/examples/llama-eval/llama-eval.py similarity index 100% rename from examples/llama-eval/llama-eval-new.py rename to examples/llama-eval/llama-eval.py index eacbe3d887..7396261bff 100755 --- a/examples/llama-eval/llama-eval-new.py +++ b/examples/llama-eval/llama-eval.py @@ -460,15 +460,15 @@ class Processor: print(f"\nProcessing {n_cases} {self.dataset_type.upper()} questions...") print(f"Server: {self.server_url} (model: {self.model_name})") - print(f"Threads: {self.threads}") - print(f"Max tokens: {self.n_predict}") - print(f"Seed: {self.seed}") - print(f"Sampling: temp={self.sampling_config.get('temperature', 'skip')}, top-k={self.sampling_config.get('top_k', 'skip')}, top-p={self.sampling_config.get('top_p', 'skip')}, min-p={self.sampling_config.get('min_p', 'skip')}") print(f"Grader: {self.grader.grader_type}", end="") if self.grader.grader_type == "llm": judge_model = self.judge_model_name if self.judge_model_name else self.model_name print(f" (judge server: {self.judge_server_url}, model: {judge_model})", end="") print() + print(f"Threads: {self.threads}") + print(f"Max tokens: {self.n_predict}") + print(f"Seed: {self.seed}") + print(f"Sampling: temp={self.sampling_config.get('temperature', 'skip')}, top-k={self.sampling_config.get('top_k', 'skip')}, top-p={self.sampling_config.get('top_p', 'skip')}, min-p={self.sampling_config.get('min_p', 'skip')}") print() dataset_size = len(self.dataset.questions)