fix 32B model
This commit is contained in:
parent
474933e252
commit
651752f1ae
|
|
@ -2579,6 +2579,11 @@ class Qwen2VLVisionModel(VisionModel):
|
|||
elif self.global_config['model_type'] == 'qwen2_5_vl':
|
||||
self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL)
|
||||
self.gguf_writer.add_vision_use_silu(True)
|
||||
out_hidden_size = hparams.get("out_hidden_size")
|
||||
if out_hidden_size == 5120:
|
||||
# 32B model does not have n_wa_pattern, the other models do
|
||||
self.gguf_writer.add_vision_n_wa_pattern(0)
|
||||
else:
|
||||
# find n_wa_pattern (window attention pattern)
|
||||
fullatt_block_indexes = hparams.get("fullatt_block_indexes")
|
||||
assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
|
||||
|
|
|
|||
Loading…
Reference in New Issue