fixed flake8 lint issues

This commit is contained in:
Saba Fallah 2025-12-05 12:24:10 +01:00
parent 5f2ee1aecf
commit 1c88647ec6
3 changed files with 15 additions and 10 deletions

View File

@ -6005,6 +6005,7 @@ class Gemma3VisionModel(MmprojModel):
return [] # skip other tensors
@ModelBase.register("DeepseekOCRForCausalLM")
class DeepseekOCRVisionModel(MmprojModel):
def set_gguf_parameters(self):
@ -6044,7 +6045,6 @@ class DeepseekOCRVisionModel(MmprojModel):
return vision_config
def tensor_force_quant(self, name, new_name, bid, n_dims):
# TODO: increase numercial stability. maybe delete later.
return gguf.GGMLQuantizationType.F32
@ -7354,8 +7354,12 @@ class DeepseekV2Model(TextModel):
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
# skip vision tensors and remove "language_model." for Kimi-VL
if "vision_" in name or "multi_modal_projector" in name \
or "image_newline" in name or "model.projector" in name or "sam_model" in name or "view_seperator" in name:
if ("vision_" in name
or "multi_modal_projector" in name
or "image_newline" in name
or "model.projector" in name
or "sam_model" in name
or "view_seperator" in name):
return []
if name.startswith("language_model."):
@ -7435,6 +7439,7 @@ class DeepseekV2Model(TextModel):
if len(experts) > 0:
raise ValueError(f"Unprocessed experts: {experts}")
@ModelBase.register("MiniMaxM2ForCausalLM")
class MiniMaxM2Model(TextModel):
model_arch = gguf.MODEL_ARCH.MINIMAXM2