Skip to content

Commit

Permalink
Rename a file, add deprecation warning for --load-in-4bit
Browse files Browse the repository at this point in the history
  • Loading branch information
oobabooga committed Mar 14, 2023
1 parent 3da73e4 commit 265ba38
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 1 deletion.
File renamed without changes.
2 changes: 1 addition & 1 deletion modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def load_model(model_name):

# Quantized model
elif shared.args.gptq_bits > 0:
from modules.quant_loader import load_quantized
from modules.GPTQ_loader import load_quantized

model = load_quantized(model_name)

Expand Down
6 changes: 6 additions & 0 deletions modules/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ def str2bool(v):
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
parser.add_argument('--load-in-4bit', action='store_true', help='DEPRECATED: use --gptq-bits 4 instead.')
parser.add_argument('--gptq-bits', type=int, default=0, help='Load a pre-quantized model with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA and OPT.')
parser.add_argument('--gptq-model-type', type=str, help='Model type of pre-quantized model. Currently only LLaMa and OPT are supported.')
parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
Expand All @@ -95,3 +96,8 @@ def str2bool(v):
parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
args = parser.parse_args()

# Provisional, this will be deleted later
if args.load_in_4bit:
print("Warning: --load-in-4bit is deprecated and will be removed. Use --gptq-bits 4 instead.\n")
args.gptq_bits = 4

0 comments on commit 265ba38

Please sign in to comment.