Update README.md
Browse files
README.md
CHANGED
@@ -40,4 +40,28 @@ The environmental-claims model is fine-tuned on the [EnvironmentalClaims](https:
|
|
40 |
url = {https://arxiv.org/abs/2209.00507},
|
41 |
publisher = {arXiv},
|
42 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
```
|
|
|
40 |
url = {https://arxiv.org/abs/2209.00507},
|
41 |
publisher = {arXiv},
|
42 |
}
|
43 |
+
```
|
44 |
+
|
45 |
+
## How to use
|
46 |
+
|
47 |
+
```python
|
48 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
49 |
+
from transformers.pipelines.pt_utils import KeyDataset
|
50 |
+
import datasets
|
51 |
+
from tqdm.auto import tqdm
|
52 |
+
|
53 |
+
dataset_name = "climatebert/environmental_claims"
|
54 |
+
model_name = "climatebert/environmental-claims"
|
55 |
+
|
56 |
+
# If you want to use your own data, simply load them as 🤗 Datasets dataset, see https://huggingface.co/docs/datasets/loading
|
57 |
+
dataset = datasets.load_dataset(dataset_name, split="test")
|
58 |
+
|
59 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
60 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, max_len=512)
|
61 |
+
|
62 |
+
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer, device=0)
|
63 |
+
|
64 |
+
# See https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.pipeline
|
65 |
+
for out in tqdm(pipe(KeyDataset(dataset, "text"), padding=True, truncation=True)):
|
66 |
+
print(out)
|
67 |
```
|