xzuyn commited on
Commit
b084026
1 Parent(s): e363f01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -3,8 +3,7 @@ import gradio as gr
3
 
4
 
5
  def tokenize(input_text):
6
- llama1_tokens = llama1_tokenizer(input_text, add_special_tokens=True)["input_ids"]
7
- llama2_tokens = llama2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
8
  mistral_tokens = mistral_tokenizer(input_text, add_special_tokens=True)["input_ids"]
9
  gpt2_tokens = gpt2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
10
  gpt_neox_tokens = gpt_neox_tokenizer(input_text, add_special_tokens=True)["input_ids"]
@@ -12,12 +11,11 @@ def tokenize(input_text):
12
  phi2_tokens = phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
13
  t5_tokens = t5_tokenizer(input_text, add_special_tokens=True)["input_ids"]
14
 
15
- return f"LLaMa-1: {len(llama1_tokens)}\nLLaMa-2: {len(llama2_tokens)}\nMistral: {len(mistral_tokens)}\nGPT-2/GPT-J: {len(gpt2_tokens)}\nGPT-NeoX: {len(gpt_neox_tokens)}\nFalcon: {len(falcon_tokens)}\nPhi-2: {len(phi2_tokens)}\nT5: {len(t5_tokens)}"
16
 
17
 
18
  if __name__ == "__main__":
19
- llama1_tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
20
- llama2_tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7B-fp16")
21
  mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
22
  gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2")
23
  gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
 
3
 
4
 
5
  def tokenize(input_text):
6
+ llama_tokens = llama2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
 
7
  mistral_tokens = mistral_tokenizer(input_text, add_special_tokens=True)["input_ids"]
8
  gpt2_tokens = gpt2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
9
  gpt_neox_tokens = gpt_neox_tokenizer(input_text, add_special_tokens=True)["input_ids"]
 
11
  phi2_tokens = phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
12
  t5_tokens = t5_tokenizer(input_text, add_special_tokens=True)["input_ids"]
13
 
14
+ return f"LLaMa: {len(llama1_tokens)}\nMistral: {len(mistral_tokens)}\nGPT-2/GPT-J: {len(gpt2_tokens)}\nGPT-NeoX: {len(gpt_neox_tokens)}\nFalcon: {len(falcon_tokens)}\nPhi-2: {len(phi2_tokens)}\nT5: {len(t5_tokens)}"
15
 
16
 
17
  if __name__ == "__main__":
18
+ llama_tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7B-fp16")
 
19
  mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
20
  gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2")
21
  gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")