Update README.md
Browse files
README.md
CHANGED
@@ -13,28 +13,28 @@ tags:
|
|
13 |
- 🤗 [UltraInteract](https://huggingface.co/datasets/openbmb/UltraInteract)
|
14 |
-
|
15 |
|
16 |
-
#
|
17 |
|
18 |
-
|
19 |
|
20 |
-
|
|
|
|
|
|
|
21 |
|
22 |
-
## Reward Modeling
|
23 |
-
|
24 |
-
On four public preference test sets, our UltraRM achieves SOTA over other open-source reward models.
|
25 |
|
26 |
## Usage
|
27 |
```python
|
28 |
-
from transformers import PreTrainedModel,
|
29 |
import torch.nn as nn
|
30 |
import torch
|
31 |
from typing import Optional, List
|
32 |
|
33 |
-
class
|
34 |
-
config_class =
|
35 |
def __init__(self, config):
|
36 |
super().__init__(config)
|
37 |
-
self.model =
|
38 |
self.regression_head = nn.Linear(self.config.hidden_size, 1, bias=False)
|
39 |
|
40 |
def forward( # args are the same as LlamaForCausalLM
|
@@ -67,38 +67,36 @@ class LlamaRewardModel(PreTrainedModel):
|
|
67 |
|
68 |
return rewards
|
69 |
|
70 |
-
ultrarm_template = """Human: {instruction}
|
71 |
-
|
72 |
-
Assistant: {completion}"""
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
]
|
|
|
78 |
|
79 |
|
80 |
-
tokenizer =
|
81 |
-
|
|
|
82 |
|
83 |
-
for example in dataset:
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
|
90 |
-
|
91 |
-
# Output
|
|
|
92 |
```
|
93 |
|
94 |
## Citation
|
95 |
```
|
96 |
-
@misc{
|
97 |
-
title={
|
98 |
-
author={Ganqu Cui and
|
99 |
-
year={
|
100 |
-
eprint={2310.01377},
|
101 |
-
archivePrefix={arXiv},
|
102 |
primaryClass={cs.CL}
|
103 |
}
|
104 |
```
|
|
|
13 |
- 🤗 [UltraInteract](https://huggingface.co/datasets/openbmb/UltraInteract)
|
14 |
-
|
15 |
|
16 |
+
# Introduction
|
17 |
|
18 |
+
Eurus-RM-7B is trained on a mixture of [UltraInteract](https://huggingface.co/datasets/openbmb/UltraInteract), [UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback), and [UltraSafety](https://huggingface.co/datasets/openbmb/UltraSafety), with a specifically designed reward modeling objective for reasoning to directly increase.
|
19 |
|
20 |
+
- EURUS-RM-7B stands out as the best 7B RM overall and achieves similar or better performance than much larger baselines. Particularly, it outperforms GPT-4 in certain tasks.
|
21 |
+
- Our training objective is beneficial in improving RM performance on hard problems and reasoning.
|
22 |
+
- ULTRAINTERACT is compatible with other datasets like UltraFeedback and UltraSafety, and mixing these datasets can balance different RM abilities.
|
23 |
+
- EURUS-RM-7B improves LLMs’ reasoning performance by a large margin through reranking.
|
24 |
|
|
|
|
|
|
|
25 |
|
26 |
## Usage
|
27 |
```python
|
28 |
+
from transformers import PreTrainedModel, AutoModel, AutoTokenizer, MistralConfig
|
29 |
import torch.nn as nn
|
30 |
import torch
|
31 |
from typing import Optional, List
|
32 |
|
33 |
+
class EurusRewardModel(PreTrainedModel):
|
34 |
+
config_class = MistralConfig
|
35 |
def __init__(self, config):
|
36 |
super().__init__(config)
|
37 |
+
self.model = AutoModel.from_pretrained(config)
|
38 |
self.regression_head = nn.Linear(self.config.hidden_size, 1, bias=False)
|
39 |
|
40 |
def forward( # args are the same as LlamaForCausalLM
|
|
|
67 |
|
68 |
return rewards
|
69 |
|
|
|
|
|
|
|
70 |
|
71 |
+
def test(model_path):
|
72 |
+
dataset = [ # cases in webgpt; we use the same template as Mistral-Instruct-v0.2
|
73 |
+
{"chosen":"[INST] \"Who orders martinis \"\"shaken, not stirred\"\"?\" [\INST] Sean Connery's character, fictional British Secret Service agent James Bond, in the movie Goldfinger, stated that he preferred his martini to be \"shaken, not stirred\". [1] Some believe that Bond ordered his martini shaken because of the vodka it contained, as vodka was, for the most part, refined from potatoes (cheaper brands) which made the vodka oily. To disperse the oil, Bond ordered his martinis shaken. [2]","rejected":"[INST] \"Who orders martinis \"\"shaken, not stirred\"\"?\" [\INST] Fleming's fictional British Secret Service agent James Bond orders his martini cocktail shaken, not stirred [1]. Bond's preferences for his martini are carried over to the films, where his orders are seen in both the 1961 film Dr. No and the 2006 film Casino Royale [1, 2]. In both films, Bond's subordinates copy his order, telling the bartender to keep the fruit with their drinks [2]. However, in the 2006 film, Bond appears irritated when the bartender asks if he would like his drink shaken or stirred [2]."},
|
74 |
+
{"chosen":"[INST] Sural relates to which part of the body? [\INST] The sural region is the muscular swelling of the back of the leg below the knee, formed chiefly by the bellies of the gastrocnemius and soleus muscles [1,2].","rejected":"[INST] Sural relates to which part of the body? [\INST] The Sural nerve runs down the side of the leg near the small saphenous vein, then passes forward below the lateral malleolus and continues on the outside of the foot as the lateral dorsal cutaneous nerve, which then communicates with the intermediate dorsal cutaneous nerve, which branches off to the side of the foot. [1]"}
|
75 |
+
]
|
76 |
|
77 |
|
78 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
79 |
+
config = AutoConfig.from_pretrained(model_path)
|
80 |
+
model = EurusRewardModel(config)
|
81 |
|
82 |
+
for example in dataset:
|
83 |
+
inputs = tokenizer(example["chosen"], return_tensors="pt")
|
84 |
+
chosen_reward = model(**inputs).item()
|
85 |
+
inputs = tokenizer(example["rejected"], return_tensors="pt")
|
86 |
+
rejected_reward = model(**inputs).item()
|
87 |
+
print(chosen_reward - rejected_reward)
|
88 |
|
89 |
+
test("openbmb/Eurus-RM-7b")
|
90 |
+
# Output 1: 0.14470714330673218
|
91 |
+
# Output 2: 0.7317184507846832
|
92 |
```
|
93 |
|
94 |
## Citation
|
95 |
```
|
96 |
+
@misc{yuan2024advancing,
|
97 |
+
title={Advancing LLM Reasoning Generalists with Preference Trees},
|
98 |
+
author={Lifan Yuan and Ganqu Cui and Hanbin Wang and Ning Ding and Xingyao Wang and Jia Deng and Boji Shan and Huimin Chen and Ruobing Xie and Yankai Lin and Zhenghao Liu and Bowen Zhou and Hao Peng and Zhiyuan Liu and Maosong Sun},
|
99 |
+
year={2024},
|
|
|
|
|
100 |
primaryClass={cs.CL}
|
101 |
}
|
102 |
```
|