File size: 2,239 Bytes
a55e991 34e3d23 a55e991 58e6dcf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
---
language:
- en
dataset_info:
features:
- name: prompt
dtype: string
- name: answer
dtype: string
splits:
- name: spell
num_bytes: 465046
num_examples: 1000
- name: spell_inverse
num_bytes: 446046
num_examples: 1000
- name: contains_char
num_bytes: 424176
num_examples: 1000
- name: contains_word
num_bytes: 530494
num_examples: 1000
- name: orth
num_bytes: 607118
num_examples: 1000
- name: sem
num_bytes: 608098
num_examples: 1000
- name: ins_char
num_bytes: 560474
num_examples: 1000
- name: ins_word
num_bytes: 775597
num_examples: 1000
- name: del_char
num_bytes: 513247
num_examples: 1000
- name: del_word
num_bytes: 689114
num_examples: 1000
- name: sub_char
num_bytes: 532364
num_examples: 1000
- name: sub_word
num_bytes: 743529
num_examples: 1000
- name: swap_char
num_bytes: 470394
num_examples: 1000
- name: swap_word
num_bytes: 675168
num_examples: 1000
download_size: 962103
dataset_size: 8040865
configs:
- config_name: default
data_files:
- split: spell
path: data/spell-*
- split: spell_inverse
path: data/spell_inverse-*
- split: contains_char
path: data/contains_char-*
- split: contains_word
path: data/contains_word-*
- split: orth
path: data/orth-*
- split: sem
path: data/sem-*
- split: ins_char
path: data/ins_char-*
- split: ins_word
path: data/ins_word-*
- split: del_char
path: data/del_char-*
- split: del_word
path: data/del_word-*
- split: sub_char
path: data/sub_char-*
- split: sub_word
path: data/sub_word-*
- split: swap_char
path: data/swap_char-*
- split: swap_word
path: data/swap_word-*
---
# CUTE
Here is the CUTE benchmark, a benchmark designed for testing LLM's ability to understand the characters within their tokens.
To use this dataset as we did, make sure to use ``` tokenizer.apply_chat_template ``` on the prompt, and then add ``` Answer: " ``` afterwards to the resulting string.
For smaller LLMs, you may need to do some post-processing on the final answer.
You can also check out our implementation at: https://github.com/Leukas/cute |