From b701d2d7759545c8d4b3515f8013cd68a7a175c0 Mon Sep 17 00:00:00 2001 From: randaller Date: Sun, 19 Mar 2023 11:34:16 +0300 Subject: [PATCH] Add files via upload --- hf-inference-example.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 hf-inference-example.py diff --git a/hf-inference-example.py b/hf-inference-example.py new file mode 100644 index 0000000..af59c80 --- /dev/null +++ b/hf-inference-example.py @@ -0,0 +1,13 @@ +import llamahf + +MODEL = 'decapoda-research/llama-7b-hf' +# MODEL = 'decapoda-research/llama-13b-hf' +# MODEL = 'decapoda-research/llama-30b-hf' +# MODEL = 'decapoda-research/llama-65b-hf' + +tokenizer = llamahf.LLaMATokenizer.from_pretrained(MODEL) +model = llamahf.LLaMAForCausalLM.from_pretrained(MODEL, low_cpu_mem_usage=True) +model.to('cpu') + +batch = tokenizer("The highest mountain in China is ", return_tensors="pt") +print(tokenizer.decode(model.generate(batch["input_ids"].cpu(), max_length=256)[0]))