fix: remove unnecessary fp32 / fp16 handling

This commit is contained in:
Manuel Schmid 2024-01-26 02:06:17 +01:00
parent 90be73a6df
commit d515d0f074
No known key found for this signature in database
GPG Key ID: 32C4F7569B40B84B
1 changed files with 0 additions and 6 deletions

View File

@ -18,7 +18,6 @@ class GroundingDinoModel(Model):
self.model = None
self.load_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.offload_device = torch.device('cpu')
self.dtype = torch.float32
def predict_with_caption(
self,
@ -36,14 +35,9 @@ class GroundingDinoModel(Model):
self.load_device = model_management.text_encoder_device()
self.offload_device = model_management.text_encoder_offload_device()
self.dtype = torch.float32
model.to(self.offload_device)
if model_management.should_use_fp16(device=self.load_device):
model.half()
self.dtype = torch.float16
self.model = ModelPatcher(model, load_device=self.load_device, offload_device=self.offload_device)
model_management.load_model_gpu(self.model)