mponty commited on
Commit
7f914e8
·
verified ·
1 Parent(s): c8484ea

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +140 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Import Libraries ###
2
+ import streamlit as st
3
+ import itertools
4
+ from word_piece_tokenizer import WordPieceTokenizer
5
+ import tiktoken
6
+
7
+ from nltk.tokenize import TreebankWordTokenizer, wordpunct_tokenize, TweetTokenizer
8
+
9
+ ### User Interface ###
10
+ st.title("Tokenization")
11
+
12
+ st.write(
13
+ """Tokenization is the first step of many natural language processing tasks. A tokenizer breaks down the text into smaller parts,
14
+ called tokens. For example, a token could be an entire word or a sub-word made of a sequence of letters. After the tokens are created, they are
15
+ translated into a set of numerical IDs in order to be processed. Choosing a tokenizer affects the speed and quality of your results. When using a large language model (LLM),
16
+ the tokenizer used to train the model should be used to ensure compatibility."""
17
+ )
18
+
19
+ txt = st.text_area("Paste text to tokenize", max_chars=1000)
20
+
21
+ tokenizer = st.selectbox(
22
+ "Tokenizer",
23
+ (
24
+ "White Space",
25
+ "Penn Treebank (NLTK Default)",
26
+ "Tweet Tokenizer (NLTK)",
27
+ "WordPiece (BERT)",
28
+ "Byte Pair Encoding (Open AI GPT-4o)",
29
+ ),
30
+ index=None,
31
+ placeholder="Select a tokenizer",
32
+ )
33
+
34
+ token_id = st.checkbox("Translate tokens into IDs", value=False)
35
+
36
+ ### Helper Functions ###
37
+
38
+
39
+ def white_space_tokenizer(txt):
40
+ return txt.split()
41
+
42
+
43
+ def treebank_tokenizer(txt):
44
+ return TreebankWordTokenizer().tokenize(txt)
45
+
46
+
47
+ ## Write tokenized output to screen ##
48
+
49
+ # Output colors to cycle through
50
+ colors = ["blue", "green", "orange", "red", "violet"]
51
+ color = itertools.cycle(colors)
52
+
53
+
54
+ # Stream data to screen
55
+ def stream_data():
56
+ for token in split_tokens:
57
+ yield f":{next(color)}-background[{token}] "
58
+
59
+
60
+ def unique_list(token_list):
61
+ token_set = set(token_list)
62
+ return list(token_set)
63
+
64
+
65
+ def stream_token_ids():
66
+ st.write(f"Unique tokens: {len(unique_tokens)}")
67
+ for token in split_tokens:
68
+ yield f":{next(color)}-background[{unique_tokens.index(token)}] "
69
+
70
+
71
+ def stream_wp_token_ids():
72
+ st.write(f"Unique tokens: {len(unique_list(ids))}")
73
+ for id in ids:
74
+ yield f":{next(color)}-background[{id}] "
75
+
76
+
77
+ ### Tokenizer Descriptions ###
78
+
79
+ white_space_desc = """A basic word-level tokenizer that splits text based on white space. This tokenizer is simple and fast, but it will not handle punctuation or special characters."""
80
+ treebank_desc = """The Penn Treebank tokenizer is the default word-level tokenizer in the Natural Language Toolkit (NLTK). It is a more advanced tokenizer that can handle punctuation and special characters."""
81
+ tweet_desc = """The TweetTokenizer is a specialized word-level tokenizer that is designed to handle text from social media platforms. It is able to handle hashtags, mentions, and emojis."""
82
+ wordpiece_desc = """Word Piece is a sub-word tokenizer that is used in BERT and other transformer models. It breaks down words into smaller sub-word units, which can be useful for handling rare or out-of-vocabulary words."""
83
+ bpe_desc = """Byte Pair Encoding (BPE) is a sub-word tokenizer that is used in models like Open AI's GPT-4o. It breaks down words into smaller sub-word units based on the frequency of character pairs in the text."""
84
+
85
+ # Create a dictionary of tokenized words
86
+
87
+ ## Tokenizer Selection ##
88
+
89
+ if tokenizer == "White Space":
90
+ with st.expander("About White Space Tokenizer"):
91
+ st.write(white_space_desc)
92
+ split_tokens = white_space_tokenizer(txt)
93
+ st.write(stream_data)
94
+ if token_id == True:
95
+ color = itertools.cycle(colors)
96
+ unique_tokens = unique_list(split_tokens)
97
+ st.write(stream_token_ids)
98
+
99
+ elif tokenizer == "Penn Treebank (NLTK Default)":
100
+ with st.expander("About Penn Treebank Tokenizer"):
101
+ st.write(treebank_desc)
102
+ split_tokens = TreebankWordTokenizer().tokenize(txt)
103
+ st.write(stream_data)
104
+ if token_id == True:
105
+ color = itertools.cycle(colors)
106
+ unique_tokens = unique_list(split_tokens)
107
+ st.write(stream_token_ids)
108
+
109
+ elif tokenizer == "Tweet Tokenizer (NLTK)":
110
+ with st.expander("About Tweet Tokenizer"):
111
+ st.write(tweet_desc)
112
+ split_tokens = TweetTokenizer().tokenize(txt)
113
+ st.write(stream_data)
114
+ if token_id == True:
115
+ color = itertools.cycle(colors)
116
+ unique_tokens = unique_list(split_tokens)
117
+ st.write(stream_token_ids)
118
+
119
+ elif tokenizer == "WordPiece (BERT)":
120
+ with st.expander("About WordPiece Tokenizer"):
121
+ st.write(wordpiece_desc)
122
+ ids = WordPieceTokenizer().tokenize(txt)
123
+ split_tokens = WordPieceTokenizer().convert_ids_to_tokens(ids)
124
+ st.write(stream_data)
125
+ if token_id == True:
126
+ color = itertools.cycle(colors)
127
+ st.write(stream_wp_token_ids)
128
+
129
+ elif tokenizer == "Byte Pair Encoding (Open AI GPT-4o)":
130
+ with st.expander("About Byte Pair Encoding (BPE)"):
131
+ st.write(bpe_desc)
132
+ encoding = tiktoken.encoding_for_model("gpt-4o")
133
+ ids = encoding.encode(txt)
134
+ split_tokens = [
135
+ encoding.decode_single_token_bytes(id).decode("utf-8") for id in ids
136
+ ]
137
+ st.write(stream_data)
138
+ if token_id == True:
139
+ color = itertools.cycle(colors)
140
+ st.write(stream_wp_token_ids)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ nltk
2
+ word-piece-tokenizer
3
+ tiktoken