@ -1,6 +1,6 @@
import llamahf
# to save memory use bfloat16 on cpu
# # to save memory use bfloat16 on cpu
# import torch
# torch.set_default_dtype(torch.bfloat16)