Skip to content

Commit

Permalink
Speed up inference on nvidia 10 series on Linux.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Sep 1, 2024
1 parent b643eae commit 8d31a66
Showing 1 changed file with 7 additions and 2 deletions.
9 changes: 7 additions & 2 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,8 +370,10 @@ def offloaded_memory(loaded_models, device):
offloaded_mem += m.model_offloaded_memory()
return offloaded_mem

WINDOWS = any(platform.win32_ver())

EXTRA_RESERVED_VRAM = 400 * 1024 * 1024
if any(platform.win32_ver()):
if WINDOWS:
EXTRA_RESERVED_VRAM = 600 * 1024 * 1024 #Windows is higher because of the shared vram issue

if args.reserve_vram is not None:
Expand Down Expand Up @@ -1002,7 +1004,10 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma
nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050", "p40", "p100", "p6", "p4"]
for x in nvidia_10_series:
if x in props.name.lower():
return True
if WINDOWS or manual_cast:
return True
else:
return False #weird linux behavior where fp32 is faster

if manual_cast:
free_model_memory = maximum_vram_for_weights(device)
Expand Down

0 comments on commit 8d31a66

Please sign in to comment.