model-conversion : add warn about transformers mismatch (#18691)

This commit adds a check comparing the installed transformers library
with the transformers version that the original model supports. This
check will be performed upon a model verification failure and prints a
warning/hint to the user suggesting to install the correct version of
the transformers library.

The motivation for this change is that it is possible for the model
verification to fail due to differences in the transformers library used
and it might not be obvious that this could be the cause of the failure.
With this warning the correct version can be checked and hopefully save
time troubleshooting the cause of the verification failure.
This commit is contained in:
Daniel Bevenius 2026-01-08 09:29:53 +01:00 committed by GitHub
parent df7fb92170
commit 9c142e3a2a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 63 additions and 10 deletions

View File

@ -61,7 +61,7 @@ causal-run-converted-model:
@CONVERTED_MODEL="$(CONVERTED_MODEL)" ./scripts/causal/run-converted-model.sh
causal-verify-logits: causal-run-original-model causal-run-converted-model
@./scripts/causal/compare-logits.py
@MODEL_PATH="$(MODEL_PATH)" ./scripts/causal/compare-logits.py
@MODEL_PATH="$(MODEL_PATH)" ./scripts/utils/check-nmse.py -m ${MODEL_PATH}
causal-run-original-embeddings:

View File

@ -3,10 +3,11 @@
import sys
import numpy as np
from pathlib import Path
import os
# Add utils directory to path for direct script execution
sys.path.insert(0, str(Path(__file__).parent.parent / "utils"))
from common import get_model_name_from_env_path, compare_tokens # type: ignore[import-not-found]
from common import get_model_name_from_env_path, compare_tokens, exit_with_warning # type: ignore[import-not-found]
def quick_logits_check(pytorch_file, llamacpp_file):
"""Lightweight sanity check before NMSE"""
@ -38,6 +39,7 @@ def quick_logits_check(pytorch_file, llamacpp_file):
return True
def main():
model_path = os.environ.get('MODEL_PATH')
model_name = get_model_name_from_env_path('MODEL_PATH')
data_dir = Path("data")
pytorch_file = data_dir / f"pytorch-{model_name}.bin"
@ -62,8 +64,7 @@ def main():
print("🔍 Token Comparison Check")
print("=" * 40)
if not compare_tokens(f"pytorch-{model_name}", f"llamacpp-{llamacpp_model_name}"):
print("\n❌ Token mismatch detected")
sys.exit(1)
exit_with_warning("\n❌ Token mismatch detected", model_path)
print()
print("🔍 GGML Model Validation for model ", model_name)
@ -80,8 +81,7 @@ def main():
print(" Ok to proceed with NMSE check...")
sys.exit(0)
else:
print(f"❌ NOK: Top 10 predictions don't match - generation will differ")
sys.exit(1)
exit_with_warning(f"❌ NOK: Top 10 predictions don't match - generation will differ", model_path)
if __name__ == "__main__":
main()

View File

@ -3,6 +3,9 @@
import os
import sys
import torch
import transformers
import json
import textwrap
import numpy as np
from pathlib import Path
@ -243,3 +246,54 @@ def compare_tokens(original, converted, type_suffix="", output_dir="data"):
print(f" ... and {len(mismatches) - num_to_show} more mismatches")
return False
def show_version_warning(current_version, model_version):
if not model_version:
return False
try:
from packaging.version import parse, InvalidVersion
try:
return parse(current_version) < parse(model_version)
except InvalidVersion:
return current_version != model_version
except ImportError:
return current_version != model_version
def get_model_transformers_version(model_path):
if not model_path:
return None
config_path = Path(model_path) / "config.json"
if not config_path.is_file():
return None
try:
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
return config.get("transformers_version")
except (IOError, json.JSONDecodeError) as e:
print(f"Warning: Could not read or parse {config_path}: {e}", file=sys.stderr)
return None
def exit_with_warning(message, model_path):
print(message)
if model_path and transformers is not None:
model_transformers_version = get_model_transformers_version(model_path)
transformers_version = transformers.__version__
if show_version_warning(transformers_version, model_transformers_version):
warning_message = f"""
=====================================================================
Verification failure might be due to a transformers version mismatch:
Current transformers version: {transformers_version}
Model's required version : {model_transformers_version}
Consider installing the version specified by the model's config:
pip install transformers=={model_transformers_version}
=====================================================================
"""
print(textwrap.dedent(warning_message))
sys.exit(1)

View File

@ -7,7 +7,7 @@ import importlib
from pathlib import Path
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, AutoModel
from common import compare_tokens # type: ignore[import-not-found]
from common import compare_tokens, exit_with_warning # type: ignore[import-not-found]
unreleased_model_name = os.getenv('UNRELEASED_MODEL_NAME')
@ -174,8 +174,7 @@ def main():
print("=" * 70)
data_dir = python_emb_path.parent
if not compare_tokens(python_model_name, cpp_model_name, type_suffix="-embeddings", output_dir=str(data_dir)):
print("\n❌ Token mismatch detected")
exit(1)
exit_with_warning("\n❌ Token mismatch detected", args.model_path)
print()
# Single prompt detailed comparison
@ -237,7 +236,7 @@ def main():
elif avg_cross_sim > 0.70:
print("⚠️ FAIR: Models have some differences")
else:
print("❌ POOR: Models are significantly different")
exit_with_warning("❌ POOR: Models are significantly different", args.model_path)
if __name__ == "__main__":
main()