|
|
|
@ -17,7 +17,7 @@ tokenizer = llamahf.LLaMATokenizer.from_pretrained(MODEL)
|
|
|
|
model = llamahf.LLaMAForCausalLM.from_pretrained(MODEL).cpu()
|
|
|
|
model = llamahf.LLaMAForCausalLM.from_pretrained(MODEL).cpu()
|
|
|
|
|
|
|
|
|
|
|
|
if tokenizer.pad_token is None:
|
|
|
|
if tokenizer.pad_token is None:
|
|
|
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
|
|
|
tokenizer.add_special_tokens({'pad_token': '<|endoftext|>'})
|
|
|
|
model.resize_token_embeddings(len(tokenizer))
|
|
|
|
model.resize_token_embeddings(len(tokenizer))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|