utrobinmv commited on
Commit
b15d6a0
1 Parent(s): e2c3748

updare readme

Browse files
Files changed (1) hide show
  1. README.md +23 -9
README.md CHANGED
@@ -7,6 +7,8 @@ tags:
7
  - summarization
8
  - text2text-generation
9
  - t5
 
 
10
  license: apache-2.0
11
  widget:
12
 
@@ -87,8 +89,12 @@ Example resume for English:
87
  ```python
88
  from transformers import T5ForConditionalGeneration, T5Tokenizer
89
 
 
 
90
  model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
91
  model = T5ForConditionalGeneration.from_pretrained(model_name)
 
 
92
  tokenizer = T5Tokenizer.from_pretrained(model_name)
93
 
94
  text = """Videos that say approved vaccines are dangerous and cause autism, cancer or infertility are among those that will be taken down, the company said. The policy includes the termination of accounts of anti-vaccine influencers. Tech giants have been criticised for not doing more to counter false health information on their sites. In July, US President Joe Biden said social media platforms were largely responsible for people's scepticism in getting vaccinated by spreading misinformation, and appealed for them to address the issue. YouTube, which is owned by Google, said 130,000 videos were removed from its platform since last year, when it implemented a ban on content spreading misinformation about Covid vaccines. In a blog post, the company said it had seen false claims about Covid jabs "spill over into misinformation about vaccines in general". The new policy covers long-approved vaccines, such as those against measles or hepatitis B. "We're expanding our medical misinformation policies on YouTube with new guidelines on currently administered vaccines that are approved and confirmed to be safe and effective by local health authorities and the WHO," the post said, referring to the World Health Organization."""
@@ -98,7 +104,7 @@ prefix = 'summary: '
98
  src_text = prefix + text
99
  input_ids = tokenizer(src_text, return_tensors="pt")
100
 
101
- generated_tokens = model.generate(**input_ids)
102
 
103
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
104
  print(result)
@@ -109,7 +115,7 @@ prefix = 'summary brief: '
109
  src_text = prefix + text
110
  input_ids = tokenizer(src_text, return_tensors="pt")
111
 
112
- generated_tokens = model.generate(**input_ids)
113
 
114
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
115
  print(result)
@@ -120,7 +126,7 @@ prefix = 'summary big: '
120
  src_text = prefix + text
121
  input_ids = tokenizer(src_text, return_tensors="pt")
122
 
123
- generated_tokens = model.generate(**input_ids)
124
 
125
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
126
  print(result)
@@ -134,8 +140,12 @@ Example resume for Chinese text on English language:
134
  ```python
135
  from transformers import T5ForConditionalGeneration, T5Tokenizer
136
 
 
 
137
  model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
138
  model = T5ForConditionalGeneration.from_pretrained(model_name)
 
 
139
  tokenizer = T5Tokenizer.from_pretrained(model_name)
140
 
141
  text = """在北京冬奥会自由式滑雪女子坡面障碍技巧决赛中,中国选手谷爱凌夺得银牌。祝贺谷爱凌!今天上午,自由式滑雪女子坡面障碍技巧决赛举行。决赛分三轮进行,取选手最佳成绩排名决出奖牌。第一跳,中国选手谷爱凌获得69.90分。在12位选手中排名第三。完成动作后,谷爱凌又扮了个鬼脸,甚是可爱。第二轮中,谷爱凌在道具区第三个障碍处失误,落地时摔倒。获得16.98分。网友:摔倒了也没关系,继续加油!在第二跳失误摔倒的情况下,谷爱凌顶住压力,第三跳稳稳发挥,流畅落地!获得86.23分!此轮比赛,共12位选手参赛,谷爱凌第10位出场。网友:看比赛时我比谷爱凌紧张,加油!"""
@@ -145,7 +155,7 @@ prefix = 'summary to en: '
145
  src_text = prefix + text
146
  input_ids = tokenizer(src_text, return_tensors="pt")
147
 
148
- generated_tokens = model.generate(**input_ids)
149
 
150
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
151
  print(result)
@@ -156,7 +166,7 @@ prefix = 'summary brief to en: '
156
  src_text = prefix + text
157
  input_ids = tokenizer(src_text, return_tensors="pt")
158
 
159
- generated_tokens = model.generate(**input_ids)
160
 
161
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
162
  print(result)
@@ -167,7 +177,7 @@ prefix = 'summary big to en: '
167
  src_text = prefix + text
168
  input_ids = tokenizer(src_text, return_tensors="pt")
169
 
170
- generated_tokens = model.generate(**input_ids)
171
 
172
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
173
  print(result)
@@ -181,8 +191,12 @@ and Example resume for Russian:
181
  ```python
182
  from transformers import T5ForConditionalGeneration, T5Tokenizer
183
 
 
 
184
  model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
185
  model = T5ForConditionalGeneration.from_pretrained(model_name)
 
 
186
  tokenizer = T5Tokenizer.from_pretrained(model_name)
187
 
188
  text = """Высота башни составляет 324 метра (1063 фута), примерно такая же высота, как у 81-этажного здания, и самое высокое сооружение в Париже. Его основание квадратно, размером 125 метров (410 футов) с любой стороны. Во время строительства Эйфелева башня превзошла монумент Вашингтона, став самым высоким искусственным сооружением в мире, и этот титул она удерживала в течение 41 года до завершения строительство здания Крайслер в Нью-Йорке в 1930 году. Это первое сооружение которое достигло высоты 300 метров. Из-за добавления вещательной антенны на вершине башни в 1957 году она сейчас выше здания Крайслер на 5,2 метра (17 футов). За исключением передатчиков, Эйфелева башня является второй самой высокой отдельно стоящей структурой во Франции после виадука Мийо."""
@@ -192,7 +206,7 @@ prefix = 'summary: '
192
  src_text = prefix + text
193
  input_ids = tokenizer(src_text, return_tensors="pt")
194
 
195
- generated_tokens = model.generate(**input_ids)
196
 
197
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
198
  print(result)
@@ -203,7 +217,7 @@ prefix = 'summary brief: '
203
  src_text = prefix + text
204
  input_ids = tokenizer(src_text, return_tensors="pt")
205
 
206
- generated_tokens = model.generate(**input_ids)
207
 
208
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
209
  print(result)
@@ -214,7 +228,7 @@ prefix = 'summary big: '
214
  src_text = prefix + text
215
  input_ids = tokenizer(src_text, return_tensors="pt")
216
 
217
- generated_tokens = model.generate(**input_ids)
218
 
219
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
220
  print(result)
 
7
  - summarization
8
  - text2text-generation
9
  - t5
10
+ base_model:
11
+ - utrobinmv/t5_translate_en_ru_zh_base_200
12
  license: apache-2.0
13
  widget:
14
 
 
89
  ```python
90
  from transformers import T5ForConditionalGeneration, T5Tokenizer
91
 
92
+ device = 'cuda' #or 'cpu' for translate on cpu
93
+
94
  model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
95
  model = T5ForConditionalGeneration.from_pretrained(model_name)
96
+ model.eval()
97
+ model.to(device)
98
  tokenizer = T5Tokenizer.from_pretrained(model_name)
99
 
100
  text = """Videos that say approved vaccines are dangerous and cause autism, cancer or infertility are among those that will be taken down, the company said. The policy includes the termination of accounts of anti-vaccine influencers. Tech giants have been criticised for not doing more to counter false health information on their sites. In July, US President Joe Biden said social media platforms were largely responsible for people's scepticism in getting vaccinated by spreading misinformation, and appealed for them to address the issue. YouTube, which is owned by Google, said 130,000 videos were removed from its platform since last year, when it implemented a ban on content spreading misinformation about Covid vaccines. In a blog post, the company said it had seen false claims about Covid jabs "spill over into misinformation about vaccines in general". The new policy covers long-approved vaccines, such as those against measles or hepatitis B. "We're expanding our medical misinformation policies on YouTube with new guidelines on currently administered vaccines that are approved and confirmed to be safe and effective by local health authorities and the WHO," the post said, referring to the World Health Organization."""
 
104
  src_text = prefix + text
105
  input_ids = tokenizer(src_text, return_tensors="pt")
106
 
107
+ generated_tokens = model.generate(**input_ids.to(device))
108
 
109
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
110
  print(result)
 
115
  src_text = prefix + text
116
  input_ids = tokenizer(src_text, return_tensors="pt")
117
 
118
+ generated_tokens = model.generate(**input_ids.to(device))
119
 
120
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
121
  print(result)
 
126
  src_text = prefix + text
127
  input_ids = tokenizer(src_text, return_tensors="pt")
128
 
129
+ generated_tokens = model.generate(**input_ids.to(device))
130
 
131
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
132
  print(result)
 
140
  ```python
141
  from transformers import T5ForConditionalGeneration, T5Tokenizer
142
 
143
+ device = 'cuda' #or 'cpu' for translate on cpu
144
+
145
  model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
146
  model = T5ForConditionalGeneration.from_pretrained(model_name)
147
+ model.eval()
148
+ model.to(device)
149
  tokenizer = T5Tokenizer.from_pretrained(model_name)
150
 
151
  text = """在北京冬奥会自由式滑雪女子坡面障碍技巧决赛中,中国选手谷爱凌夺得银牌。祝贺谷爱凌!今天上午,自由式滑雪女子坡面障碍技巧决赛举行。决赛分三轮进行,取选手最佳成绩排名决出奖牌。第一跳,中国选手谷爱凌获得69.90分。在12位选手中排名第三。完成动作后,谷爱凌又扮了个鬼脸,甚是可爱。第二轮中,谷爱凌在道具区第三个障碍处失误,落地时摔倒。获得16.98分。网友:摔倒了也没关系,继续加油!在第二跳失误摔倒的情况下,谷爱凌顶住压力,第三跳稳稳发挥,流畅落地!获得86.23分!此轮比赛,共12位选手参赛,谷爱凌第10位出场。网友:看比赛时我比谷爱凌紧张,加油!"""
 
155
  src_text = prefix + text
156
  input_ids = tokenizer(src_text, return_tensors="pt")
157
 
158
+ generated_tokens = model.generate(**input_ids.to(device))
159
 
160
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
161
  print(result)
 
166
  src_text = prefix + text
167
  input_ids = tokenizer(src_text, return_tensors="pt")
168
 
169
+ generated_tokens = model.generate(**input_ids.to(device))
170
 
171
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
172
  print(result)
 
177
  src_text = prefix + text
178
  input_ids = tokenizer(src_text, return_tensors="pt")
179
 
180
+ generated_tokens = model.generate(**input_ids.to(device))
181
 
182
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
183
  print(result)
 
191
  ```python
192
  from transformers import T5ForConditionalGeneration, T5Tokenizer
193
 
194
+ device = 'cuda' #or 'cpu' for translate on cpu
195
+
196
  model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
197
  model = T5ForConditionalGeneration.from_pretrained(model_name)
198
+ model.eval()
199
+ model.to(device)
200
  tokenizer = T5Tokenizer.from_pretrained(model_name)
201
 
202
  text = """Высота башни составляет 324 метра (1063 фута), примерно такая же высота, как у 81-этажного здания, и самое высокое сооружение в Париже. Его основание квадратно, размером 125 метров (410 футов) с любой стороны. Во время строительства Эйфелева башня превзошла монумент Вашингтона, став самым высоким искусственным сооружением в мире, и этот титул она удерживала в течение 41 года до завершения строительство здания Крайслер в Нью-Йорке в 1930 году. Это первое сооружение которое достигло высоты 300 метров. Из-за добавления вещательной антенны на вершине башни в 1957 году она сейчас выше здания Крайслер на 5,2 метра (17 футов). За исключением передатчиков, Эйфелева башня является второй самой высокой отдельно стоящей структурой во Франции после виадука Мийо."""
 
206
  src_text = prefix + text
207
  input_ids = tokenizer(src_text, return_tensors="pt")
208
 
209
+ generated_tokens = model.generate(**input_ids.to(device))
210
 
211
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
212
  print(result)
 
217
  src_text = prefix + text
218
  input_ids = tokenizer(src_text, return_tensors="pt")
219
 
220
+ generated_tokens = model.generate(**input_ids.to(device))
221
 
222
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
223
  print(result)
 
228
  src_text = prefix + text
229
  input_ids = tokenizer(src_text, return_tensors="pt")
230
 
231
+ generated_tokens = model.generate(**input_ids.to(device))
232
 
233
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
234
  print(result)