From b303ebfd92b9b55f68799b0c8662342d70e458ec Mon Sep 17 00:00:00 2001 From: randaller Date: Sun, 19 Mar 2023 15:35:17 +0300 Subject: [PATCH] Update hf-inference-cuda-example.py --- hf-inference-cuda-example.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hf-inference-cuda-example.py b/hf-inference-cuda-example.py index f75e33b..6d0ac01 100644 --- a/hf-inference-cuda-example.py +++ b/hf-inference-cuda-example.py @@ -1,4 +1,5 @@ import llamahf +import os from accelerate import infer_auto_device_map # # to save memory use bfloat16 on cpu @@ -10,7 +11,8 @@ MODEL = 'decapoda-research/llama-7b-hf' # MODEL = 'decapoda-research/llama-30b-hf' # MODEL = 'decapoda-research/llama-65b-hf' -# MODEL = './trained' +if os.path.exists('./trained'): + MODEL = './trained' tokenizer = llamahf.LLaMATokenizer.from_pretrained(MODEL) model = llamahf.LLaMAForCausalLM.from_pretrained(MODEL, low_cpu_mem_usage=True, device_map="auto", offload_folder="./offload")