Skip to content

Commit 492e168

Browse files
committed
Fix load_compress_model retry using bitwise ~ on bool instead of logical not
1 parent 587d5cf commit 492e168

1 file changed

Lines changed: 1 addition & 1 deletion

File tree

fastchat/model/compression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def load_compress_model(model_path, device, torch_dtype, use_fast, revision="mai
115115
)
116116
except TypeError:
117117
tokenizer = AutoTokenizer.from_pretrained(
118-
model_path, use_fast=~use_fast, revision=revision, trust_remote_code=True
118+
model_path, use_fast=not use_fast, revision=revision, trust_remote_code=True
119119
)
120120
with init_empty_weights():
121121
# `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel

0 commit comments

Comments
 (0)