arnocandel commited on
Commit
eec5f6e
1 Parent(s): 8f10eef

commit files to HF hub

Browse files
Files changed (1) hide show
  1. README.md +18 -5
README.md CHANGED
@@ -44,8 +44,7 @@ import torch
44
  from transformers import pipeline, AutoTokenizer
45
 
46
  tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oasst1-falcon-40b", padding_side="left")
47
- generate_text = pipeline(model="h2oai/h2ogpt-oasst1-falcon-40b", tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", prompt_type='human_bot')
48
-
49
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
50
  print(res[0]["generated_text"])
51
  ```
@@ -60,7 +59,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
60
 
61
  tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oasst1-falcon-40b", padding_side="left")
62
  model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-oasst1-falcon-40b", torch_dtype=torch.bfloat16, device_map="auto")
63
- generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer, prompt_type='human_bot')
64
 
65
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
66
  print(res[0]["generated_text"])
@@ -145,9 +144,23 @@ RWConfig {
145
  Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
146
 
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- TBD
150
-
151
 
152
  ## Disclaimer
153
 
 
44
  from transformers import pipeline, AutoTokenizer
45
 
46
  tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oasst1-falcon-40b", padding_side="left")
47
+ generate_text = pipeline(model="h2oai/h2ogpt-oasst1-falcon-40b", tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", prompt_type="human_bot")
 
48
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
49
  print(res[0]["generated_text"])
50
  ```
 
59
 
60
  tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oasst1-falcon-40b", padding_side="left")
61
  model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-oasst1-falcon-40b", torch_dtype=torch.bfloat16, device_map="auto")
62
+ generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer, prompt_type="human_bot")
63
 
64
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
65
  print(res[0]["generated_text"])
 
144
  Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
145
 
146
 
147
+ [eval source code](https://github.com/h2oai/h2ogpt/issues/216#issuecomment-1579573101)
148
+
149
+ | Task |Version| Metric |Value | |Stderr|
150
+ |-------------|------:|--------|-----:|---|-----:|
151
+ |arc_challenge| 0|acc |0.5196|± |0.0146|
152
+ | | |acc_norm|0.5461|± |0.0145|
153
+ |arc_easy | 0|acc |0.8190|± |0.0079|
154
+ | | |acc_norm|0.7799|± |0.0085|
155
+ |boolq | 1|acc |0.8514|± |0.0062|
156
+ |hellaswag | 0|acc |0.6485|± |0.0048|
157
+ | | |acc_norm|0.8314|± |0.0037|
158
+ |openbookqa | 0|acc |0.3860|± |0.0218|
159
+ | | |acc_norm|0.4880|± |0.0224|
160
+ |piqa | 0|acc |0.8194|± |0.0090|
161
+ | | |acc_norm|0.8335|± |0.0087|
162
+ |winogrande | 0|acc |0.7751|± |0.0117|
163
 
 
 
164
 
165
  ## Disclaimer
166