From ec76de8bfa6c0c04cf65c0d8a0fc14aa78f09732 Mon Sep 17 00:00:00 2001 From: randaller Date: Sun, 19 Mar 2023 14:25:51 +0300 Subject: [PATCH] Create hf-inference-cuda-example.py --- hf-inference-cuda-example.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 hf-inference-cuda-example.py diff --git a/hf-inference-cuda-example.py b/hf-inference-cuda-example.py new file mode 100644 index 0000000..f75e33b --- /dev/null +++ b/hf-inference-cuda-example.py @@ -0,0 +1,23 @@ +import llamahf +from accelerate import infer_auto_device_map + +# # to save memory use bfloat16 on cpu +# import torch +# torch.set_default_dtype(torch.bfloat16) + +MODEL = 'decapoda-research/llama-7b-hf' +# MODEL = 'decapoda-research/llama-13b-hf' +# MODEL = 'decapoda-research/llama-30b-hf' +# MODEL = 'decapoda-research/llama-65b-hf' + +# MODEL = './trained' + +tokenizer = llamahf.LLaMATokenizer.from_pretrained(MODEL) +model = llamahf.LLaMAForCausalLM.from_pretrained(MODEL, low_cpu_mem_usage=True, device_map="auto", offload_folder="./offload") + +# will use 6 Gb of GPU VRAM, others to CPU RAM +device_map = infer_auto_device_map(model, max_memory={0: "6GiB", "cpu": "128GiB"}) +print(device_map) + +batch = tokenizer("The highest mountain in China is ", return_tensors="pt") +print(tokenizer.decode(model.generate(batch["input_ids"].cuda(), max_length=100)[0]))