[bot] Conversion to Parquet
The parquet-converter bot has created a version of this dataset in the Parquet format. You can learn more about the advantages associated with this format in the documentation.
The Parquet files are published in the refs/convert/parquet
branch.
import torch
from transformers import LlamaForCausalLM, LlamaTokenizer, LlamaModel
import pandas as pd
import requests
print(torch.cuda.is_available())
model_id="./llama2chat7Bhf"
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
def get_embedding(text):
text = text.replace("\n", " ")
if not text:
text = "this is blank"
embeddings = model.encode(text)
return embeddings
text = "This is an example sentence"
result = get_embedding(text)
print(result)
tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(model_id, device_map='auto', torch_dtype=torch.float16)
tokenizer.add_special_tokens(
{
"pad_token": "<PAD>",
}
)
model.resize_token_embeddings(model.config.vocab_size + 1)
model_id = "sentence-transformers/all-MiniLM-L6-v2"
hf_token = "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"
api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}"
headers = {"Authorization": f"Bearer {hf_token}"}
def query(texts):
response = requests.post(api_url, headers=headers)
return response
eval_prompt = """
Summarize this dialog:
A: Hi Tom, are you busy tomorrow’s afternoon?
B: I’m pretty sure I am. What’s up?
A: Can you go with me to the animal shelter?.
B: What do you want to do?
A: I want to get a puppy for my son.
B: That will make him so happy.
A: Yeah, we’ve discussed it many times. I think he’s ready now.
B: That’s good. Raising a dog is a tough issue. Like having a baby ;-)
A: I'll get him one of those little dogs.
B: One that won't grow up too big;-)
A: And eat too much;-))
B: Do you know which one he would like?
A: Oh, yes, I took him there last Monday. He showed me one that he really liked.
B: I bet you had to drag him away.
A: He wanted to take it home right away ;-).
B: I wonder what he'll name it.
A: He said he’d name it after his dead hamster – Lemmy - he's a great Motorhead fan :-)))
Summary:
"""
test_prompt = """
Answer this question:
What is Red Dragon?
Answer:
"""
q_length = len(test_prompt)
model_input = tokenizer(test_prompt, padding=True, truncation=True, return_tensors="pt").to("cuda")
print(model_input)
model.eval()
#model2.eval()
with torch.no_grad():
#print(model.generate(**model_input))
output = model.generate(**model_input, max_new_tokens=50)
#a = query(output[0])
print(tokenizer.decode(output[0], skip_special_tokens=True))
#print(pd.DataFrame(a))
#print(a)