diff --git a/hf-inference-example.py b/hf-inference-example.py index 4575923..ec56f7c 100644 --- a/hf-inference-example.py +++ b/hf-inference-example.py @@ -1,4 +1,5 @@ import llamahf +import os # # to save memory use bfloat16 on cpu # import torch @@ -9,7 +10,8 @@ MODEL = 'decapoda-research/llama-7b-hf' # MODEL = 'decapoda-research/llama-30b-hf' # MODEL = 'decapoda-research/llama-65b-hf' -# MODEL = './trained' +if os.path.exists('./trained'): + MODEL = './trained' tokenizer = llamahf.LLaMATokenizer.from_pretrained(MODEL) model = llamahf.LLaMAForCausalLM.from_pretrained(MODEL, low_cpu_mem_usage=True)