|
--- |
|
license: cc-by-nc-4.0 |
|
language: |
|
- ro |
|
base_model: |
|
- OpenLLM-Ro/RoMistral-7b-Instruct-2024-10-09 |
|
datasets: |
|
- OpenLLM-Ro/ro_dpo_helpsteer |
|
model-index: |
|
- name: OpenLLM-Ro/RoMistral-7b-Instruct-DPO-2024-10-09 |
|
results: |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: RoMT-Bench |
|
type: RoMT-Bench |
|
metrics: |
|
- name: Score |
|
type: Score |
|
value: 5.88 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: RoCulturaBench |
|
type: RoCulturaBench |
|
metrics: |
|
- name: Score |
|
type: Score |
|
value: 4.72 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: Romanian_Academic_Benchmarks |
|
type: Romanian_Academic_Benchmarks |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 51.95 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_arc_challenge |
|
type: OpenLLM-Ro/ro_arc_challenge |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 50.73 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_mmlu |
|
type: OpenLLM-Ro/ro_mmlu |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 47.88 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_winogrande |
|
type: OpenLLM-Ro/ro_winogrande |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 68.41 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_hellaswag |
|
type: OpenLLM-Ro/ro_hellaswag |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 62.27 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_gsm8k |
|
type: OpenLLM-Ro/ro_gsm8k |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 32.27 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_truthfulqa |
|
type: OpenLLM-Ro/ro_truthfulqa |
|
metrics: |
|
- name: Average accuracy |
|
type: accuracy |
|
value: 50.12 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: LaRoSeDa_binary |
|
type: LaRoSeDa_binary |
|
metrics: |
|
- name: Average macro-f1 |
|
type: macro-f1 |
|
value: 82.13 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: LaRoSeDa_multiclass |
|
type: LaRoSeDa_multiclass |
|
metrics: |
|
- name: Average macro-f1 |
|
type: macro-f1 |
|
value: 65.24 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: LaRoSeDa_binary_finetuned |
|
type: LaRoSeDa_binary_finetuned |
|
metrics: |
|
- name: Average macro-f1 |
|
type: macro-f1 |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: LaRoSeDa_multiclass_finetuned |
|
type: LaRoSeDa_multiclass_finetuned |
|
metrics: |
|
- name: Average macro-f1 |
|
type: macro-f1 |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: WMT_EN-RO |
|
type: WMT_EN-RO |
|
metrics: |
|
- name: Average bleu |
|
type: bleu |
|
value: 26.25 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: WMT_RO-EN |
|
type: WMT_RO-EN |
|
metrics: |
|
- name: Average bleu |
|
type: bleu |
|
value: 6.09 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: WMT_EN-RO_finetuned |
|
type: WMT_EN-RO_finetuned |
|
metrics: |
|
- name: Average bleu |
|
type: bleu |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: WMT_RO-EN_finetuned |
|
type: WMT_RO-EN_finetuned |
|
metrics: |
|
- name: Average bleu |
|
type: bleu |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: XQuAD |
|
type: XQuAD |
|
metrics: |
|
- name: Average exact_match |
|
type: exact_match |
|
value: 23.40 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: XQuAD |
|
type: XQuAD |
|
metrics: |
|
- name: Average f1 |
|
type: f1 |
|
value: 45.80 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: XQuAD_finetuned |
|
type: XQuAD_finetuned |
|
metrics: |
|
- name: Average exact_match |
|
type: exact_match |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: XQuAD_finetuned |
|
type: XQuAD_finetuned |
|
metrics: |
|
- name: Average f1 |
|
type: f1 |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: STS |
|
type: STS |
|
metrics: |
|
- name: Average spearman |
|
type: spearman |
|
value: 77.33 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: STS |
|
type: STS |
|
metrics: |
|
- name: Average pearson |
|
type: pearson |
|
value: 76.60 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: STS_finetuned |
|
type: STS_finetuned |
|
metrics: |
|
- name: Average spearman |
|
type: spearman |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: STS_finetuned |
|
type: STS_finetuned |
|
metrics: |
|
- name: Average pearson |
|
type: pearson |
|
value: 0.00 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: RoMT-Bench |
|
type: RoMT-Bench |
|
metrics: |
|
- name: First turn |
|
type: Score |
|
value: 6.44 |
|
- name: Second turn |
|
type: Score |
|
value: 5.33 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_arc_challenge |
|
type: OpenLLM-Ro/ro_arc_challenge |
|
metrics: |
|
- name: 0-shot |
|
type: accuracy |
|
value: 51.67 |
|
- name: 1-shot |
|
type: accuracy |
|
value: 45.59 |
|
- name: 3-shot |
|
type: accuracy |
|
value: 48.24 |
|
- name: 5-shot |
|
type: accuracy |
|
value: 50.21 |
|
- name: 10-shot |
|
type: accuracy |
|
value: 54.07 |
|
- name: 25-shot |
|
type: accuracy |
|
value: 54.58 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_mmlu |
|
type: OpenLLM-Ro/ro_mmlu |
|
metrics: |
|
- name: 0-shot |
|
type: accuracy |
|
value: 40.86 |
|
- name: 1-shot |
|
type: accuracy |
|
value: 48.67 |
|
- name: 3-shot |
|
type: accuracy |
|
value: 51.26 |
|
- name: 5-shot |
|
type: accuracy |
|
value: 50.75 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_winogrande |
|
type: OpenLLM-Ro/ro_winogrande |
|
metrics: |
|
- name: 0-shot |
|
type: accuracy |
|
value: 64.80 |
|
- name: 1-shot |
|
type: accuracy |
|
value: 68.19 |
|
- name: 3-shot |
|
type: accuracy |
|
value: 70.09 |
|
- name: 5-shot |
|
type: accuracy |
|
value: 70.56 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_hellaswag |
|
type: OpenLLM-Ro/ro_hellaswag |
|
metrics: |
|
- name: 0-shot |
|
type: accuracy |
|
value: 61.96 |
|
- name: 1-shot |
|
type: accuracy |
|
value: 60.88 |
|
- name: 3-shot |
|
type: accuracy |
|
value: 61.86 |
|
- name: 5-shot |
|
type: accuracy |
|
value: 62.73 |
|
- name: 10-shot |
|
type: accuracy |
|
value: 63.93 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: OpenLLM-Ro/ro_gsm8k |
|
type: OpenLLM-Ro/ro_gsm8k |
|
metrics: |
|
- name: 1-shot |
|
type: accuracy |
|
value: 23.28 |
|
- name: 3-shot |
|
type: accuracy |
|
value: 34.95 |
|
- name: 5-shot |
|
type: accuracy |
|
value: 38.59 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: LaRoSeDa_binary |
|
type: LaRoSeDa_binary |
|
metrics: |
|
- name: 0-shot |
|
type: macro-f1 |
|
value: 34.36 |
|
- name: 1-shot |
|
type: macro-f1 |
|
value: 97.87 |
|
- name: 3-shot |
|
type: macro-f1 |
|
value: 98.40 |
|
- name: 5-shot |
|
type: macro-f1 |
|
value: 97.90 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: LaRoSeDa_multiclass |
|
type: LaRoSeDa_multiclass |
|
metrics: |
|
- name: 0-shot |
|
type: macro-f1 |
|
value: 66.17 |
|
- name: 1-shot |
|
type: macro-f1 |
|
value: 65.93 |
|
- name: 3-shot |
|
type: macro-f1 |
|
value: 61.86 |
|
- name: 5-shot |
|
type: macro-f1 |
|
value: 66.99 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: WMT_EN-RO |
|
type: WMT_EN-RO |
|
metrics: |
|
- name: 0-shot |
|
type: bleu |
|
value: 18.43 |
|
- name: 1-shot |
|
type: bleu |
|
value: 28.25 |
|
- name: 3-shot |
|
type: bleu |
|
value: 29.45 |
|
- name: 5-shot |
|
type: bleu |
|
value: 28.88 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: WMT_RO-EN |
|
type: WMT_RO-EN |
|
metrics: |
|
- name: 0-shot |
|
type: bleu |
|
value: 2.80 |
|
- name: 1-shot |
|
type: bleu |
|
value: 2.90 |
|
- name: 3-shot |
|
type: bleu |
|
value: 6.63 |
|
- name: 5-shot |
|
type: bleu |
|
value: 12.04 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: XQuAD_EM |
|
type: XQuAD_EM |
|
metrics: |
|
- name: 0-shot |
|
type: exact_match |
|
value: 5.04 |
|
- name: 1-shot |
|
type: exact_match |
|
value: 22.44 |
|
- name: 3-shot |
|
type: exact_match |
|
value: 30.42 |
|
- name: 5-shot |
|
type: exact_match |
|
value: 35.71 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: XQuAD_F1 |
|
type: XQuAD_F1 |
|
metrics: |
|
- name: 0-shot |
|
type: f1 |
|
value: 23.36 |
|
- name: 1-shot |
|
type: f1 |
|
value: 44.63 |
|
- name: 3-shot |
|
type: f1 |
|
value: 54.78 |
|
- name: 5-shot |
|
type: f1 |
|
value: 60.43 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: STS_Spearman |
|
type: STS_Spearman |
|
metrics: |
|
- name: 1-shot |
|
type: spearman |
|
value: 73.38 |
|
- name: 3-shot |
|
type: spearman |
|
value: 78.93 |
|
- name: 5-shot |
|
type: spearman |
|
value: 79.68 |
|
- task: |
|
type: text-generation |
|
dataset: |
|
name: STS_Pearson |
|
type: STS_Pearson |
|
metrics: |
|
- name: 1-shot |
|
type: pearson |
|
value: 73.93 |
|
- name: 3-shot |
|
type: pearson |
|
value: 77.69 |
|
- name: 5-shot |
|
type: pearson |
|
value: 78.17 |
|
|
|
--- |
|
|
|
# Model Card for Model ID |
|
|
|
<!-- Provide a quick summary of what the model is/does. --> |
|
|
|
This model points/is identical to [RoMistral-7b-Instruct-DPO-2024-10-09](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-DPO-2024-10-09). |
|
|
|
|
|
|
|
RoMistral is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **human aligned instruct 7B model**. Links to other models can be found at the bottom of this page. |
|
|
|
## Model Details |
|
|
|
### Model Description |
|
|
|
<!-- Provide a longer summary of what this model is. --> |
|
OpenLLM-Ro represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants. |
|
|
|
|
|
- **Developed by:** OpenLLM-Ro |
|
<!-- - **Funded by [optional]:** [More Information Needed] --> |
|
<!-- - **Shared by [optional]:** [More Information Needed] --> |
|
<!-- - **Model type:** [More Information Needed] --> |
|
- **Language(s):** Romanian |
|
- **License:** cc-by-nc-4.0 |
|
- **Finetuned from model:** [RoMistral-7b-Instruct-2024-10-09](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2024-10-09) |
|
- **Trained using:** [RoHelpSteer](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_helpsteer) |
|
|
|
|
|
<!-- - **Finetuned from model [optional]:** [More Information Needed] --> |
|
|
|
### Model Sources |
|
|
|
<!-- Provide the basic links for the model. --> |
|
|
|
- **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory |
|
- **Paper:** https://arxiv.org/abs/2406.18266 |
|
|
|
## Intended Use |
|
|
|
### Intended Use Cases |
|
|
|
RoMistral is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat. |
|
|
|
### Out-of-Scope Use |
|
|
|
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> |
|
|
|
Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian. |
|
|
|
|
|
|
|
## How to Get Started with the Model |
|
|
|
Use the code below to get started with the model. |
|
|
|
```python |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoMistral-7b-Instruct-DPO") |
|
model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoMistral-7b-Instruct-DPO") |
|
|
|
instruction = "Ce jocuri de societate pot juca cu prietenii mei?" |
|
chat = [ |
|
{"role": "user", "content": instruction}, |
|
] |
|
prompt = tokenizer.apply_chat_template(chat, tokenize=False, system_message="") |
|
|
|
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") |
|
outputs = model.generate(input_ids=inputs, max_new_tokens=128) |
|
print(tokenizer.decode(outputs[0])) |
|
``` |
|
|
|
## Academic Benchmarks |
|
|
|
|
|
<table> |
|
<tbody> |
|
<tr> |
|
<td><strong>Model</strong></td> |
|
<td><strong><center>Average</center></strong></td> |
|
<td><strong><center>ARC</center></strong></td> |
|
<td><strong><center>MMLU</center></strong></td> |
|
<td><strong><center>Winogrande</center></strong></td> |
|
<td><strong><center>Hellaswag</center></strong></td> |
|
<td><strong><center>GSM8k</center></strong></td> |
|
<td><strong><center>TruthfulQA</center></strong></td> |
|
</tr> |
|
<tr> |
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>47.40</center></td><td><center>46.29</center></td><td><center>47.00</center></td><td><center>58.78</center></td><td><center>54.27</center></td><td><center>13.47</center></td><td><center><strong>64.59</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>52.54</center></td><td><center>50.41</center></td><td><center><strong>51.61</strong></center></td><td><center>66.48</center></td><td><center>60.27</center></td><td><center><strong>34.19</strong></center></td><td><center>52.30</center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center><strong>52.91</strong></center></td><td><center><strong>52.27</strong></center></td><td><center>49.33</center></td><td><center><strong>70.03</strong></center></td><td><center><strong>62.88</strong></center></td><td><center>32.42</center></td><td><center>50.51</center></td> |
|
</tr> |
|
<tr> |
|
<td><em>RoMistral-7b-Instruct-DPO-2024-10-09</em></td><td><center><em>51.95</em></center></td><td><center><em>50.73</em></center></td><td><center><em>47.88</em></center></td><td><center><em>68.41</em></center></td><td><center><em>62.27</em></center></td><td><center><em>32.27</em></center></td><td><center><em>50.12</em></center></td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
|
|
## Downstream tasks |
|
|
|
<table> |
|
<tbody> |
|
<tr> |
|
<td></td> |
|
<td colspan="4"><center><strong>LaRoSeDa</strong></center></td> |
|
<td colspan="4"><center><strong>WMT</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td></td> |
|
<td colspan="2"><center><strong>Few-shot</strong></center></td> |
|
<td colspan="2"><center><strong>Finetuned</strong></center></td> |
|
<td colspan="2"><center><strong>Few-shot</strong></center></td> |
|
<td colspan="2"><center><strong>Finetuned</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td><strong>Model</strong></td> |
|
<td><center><strong>Binary<br>(Macro F1)</strong></center></td> |
|
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> |
|
<td><center><strong>Binary<br>(Macro F1)</strong></center></td> |
|
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> |
|
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td> |
|
<td><center><strong>RO-EN<br>(Bleu)</strong></center></td> |
|
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td> |
|
<td><center><strong>RO-EN<br>(Bleu)</strong></center> |
|
</tr> |
|
<tr> |
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>96.97</center></td><td><center>56.66</center></td><td><center>98.83</center></td><td><center>87.32</center></td><td><center>18.60</center></td><td><center><strong>33.99</strong></center></td><td><center>26.19</center></td><td><center>39.88</center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center><strong>97.36</strong></center></td><td><center>67.55</center></td><td><center>98.80</center></td><td><center><strong>88.28</strong></center></td><td><center>27.93</center></td><td><center>13.21</center></td><td><center><strong>28.72</strong></center></td><td><center><strong>40.86</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>95.56</center></td><td><center><strong>67.83</strong></center></td><td><center><strong>99.00</strong></center></td><td><center>87.57</center></td><td><center><strong>28.28</strong></center></td><td><center>6.10</center></td><td><center>27.70</center></td><td><center>40.36</center></td> |
|
</tr> |
|
<tr> |
|
<td><em>RoMistral-7b-Instruct-DPO-2024-10-09</em></td><td><center><em>82.13</em></center></td><td><center><em>65.24</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>26.25</em></center></td><td><center><em>6.09</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
|
|
|
|
<table> |
|
<tbody> |
|
<tr> |
|
<td></td> |
|
<td colspan="4"><center><strong>XQuAD</strong></center></td> |
|
<td colspan="4"><center><strong>STS</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td></td> |
|
<td colspan="2"><center><strong>Few-shot</strong></center></td> |
|
<td colspan="2"><center><strong>Finetuned</strong></center></td> |
|
<td colspan="2"><center><strong>Few-shot</strong></center></td> |
|
<td colspan="2"><center><strong>Finetuned</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td><strong>Model</strong></td> |
|
<td><center><strong>(EM)</strong></center></td> |
|
<td><center><strong>(F1)</strong></center></td> |
|
<td><center><strong>(EM)</strong></center></td> |
|
<td><center><strong>(F1)</strong></center></td> |
|
<td><center><strong>(Spearman)</strong></center></td> |
|
<td><center><strong>(Pearson)</strong></center></td> |
|
<td><center><strong>(Spearman)</strong></center></td> |
|
<td><center><strong>(Pearson)</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>27.92</center></td><td><center>50.71</center></td><td><center><strong>65.46</strong></center></td><td><center><strong>79.73</strong></center></td><td><center>62.62</center></td><td><center>60.86</center></td><td><center>84.92</center></td><td><center>85.44</center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center><strong>43.66</strong></center></td><td><center><strong>63.70</strong></center></td><td><center>55.04</center></td><td><center>72.31</center></td><td><center>77.43</center></td><td><center><strong>78.43</strong></center></td><td><center>87.25</center></td><td><center>87.79</center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>41.09</center></td><td><center>63.21</center></td><td><center>47.56</center></td><td><center>62.69</center></td><td><center><strong>78.47</strong></center></td><td><center>77.24</center></td><td><center><strong>87.28</strong></center></td><td><center><strong>87.88</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td><em>RoMistral-7b-Instruct-DPO-2024-10-09</em></td><td><center><em>23.40</em></center></td><td><center><em>45.80</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>77.33</em></center></td><td><center><em>76.60</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
|
|
|
|
## MT-Bench |
|
|
|
<table> |
|
<tbody> |
|
<tr> |
|
<td><strong>Model</strong></td> |
|
<td><strong><center>Average</center></strong></td> |
|
<td><strong><center>1st turn</center></strong></td> |
|
<td><strong><center>2nd turn</center></strong></td> |
|
<td><strong><center>Answers in Ro</center></strong></td> |
|
</tr> |
|
<tr> |
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>5.03</center></td><td><center>5.05</center></td><td><center>5.00</center></td><td><center>154/160</center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>4.99</center></td><td><center>5.46</center></td><td><center>4.53</center></td><td><center><strong>160/160</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>5.29</center></td><td><center>5.86</center></td><td><center>4.72</center></td><td><center><strong>160/160</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td><em>RoMistral-7b-Instruct-DPO-2024-10-09</em></td><td><center><em><strong>5.88</strong></em></center></td><td><center><em><strong>6.44</strong></em></center></td><td><center><em><strong>5.33</strong></em></center></td><td><center><em><strong>160/160</strong></em></center></td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
|
|
|
|
## RoCulturaBench |
|
|
|
<table> |
|
<tbody> |
|
<tr> |
|
<td><strong>Model</strong></td> |
|
<td><strong><center>Average</center></strong></td> |
|
<td><strong><center>Answers in Ro</center></strong></td> |
|
</tr> |
|
<tr> |
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>3.68</center></td><td><center>97/100</center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>3.38</center></td><td><center><strong>100/100</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>3.99</center></td><td><center><strong>100/100</strong></center></td> |
|
</tr> |
|
<tr> |
|
<td><em>RoMistral-7b-Instruct-DPO-2024-10-09</em></td><td><center><em><strong>4.72</strong></em></center></td><td><center><em><strong>100/100</strong></em></center></td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
|
|
|
|
|
|
|
|
## RoMistral Model Family |
|
|
|
| Model | Link | |
|
|--------------------|:--------:| |
|
|RoMistral-7b-Instruct-2024-05-17| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17) | |
|
|RoMistral-7b-Instruct-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2024-10-09) | |
|
|*RoMistral-7b-Instruct-DPO-2024-10-09*| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-DPO-2024-10-09) | |
|
|
|
|
|
## Citation |
|
|
|
``` |
|
@misc{masala2024vorbecstiromanecsterecipetrain, |
|
title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions}, |
|
author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea}, |
|
year={2024}, |
|
eprint={2406.18266}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL}, |
|
url={https://arxiv.org/abs/2406.18266}, |
|
} |
|
``` |
|
<!-- **APA:** |
|
|
|
[More Information Needed] --> |