Spaces:
Runtime error
Runtime error
upload
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- LICENSE +21 -0
- README.md +64 -12
- app.py +425 -0
- demo/ckpts/controlnet/cldm_v15.yaml +79 -0
- demo/ckpts/controlnet/control_v11p_sd15_openpose.pth +3 -0
- demo/ckpts/controlnet/idle +0 -0
- demo/ckpts/controlnet/v1-5-pruned-emaonly.safetensors +3 -0
- demo/ckpts/gligen/diffusion_pytorch_model_box.bin +3 -0
- demo/ckpts/gligen/idle +0 -0
- demo/ckpts/visorgpt/idle +0 -0
- demo/ckpts/visorgpt/visorgpt_dagger_ta_tb.pt +3 -0
- requirements.txt +160 -0
- train/README.md +1 -0
- train/__init__.py +1 -0
- train/__pycache__/__init__.cpython-38.pyc +0 -0
- train/__pycache__/__init__.cpython-39.pyc +0 -0
- train/beginning.txt +1 -0
- train/corpora/CLUECorpusSmall_bert_sampled.txt +0 -0
- train/corpora/CLUECorpusSmall_sampled.txt +0 -0
- train/corpora/book_review.txt +0 -0
- train/corpora/book_review_bert.txt +0 -0
- train/corpora/book_review_cls.txt +0 -0
- train/datasets/book_review/dev.tsv +0 -0
- train/datasets/book_review/test.tsv +0 -0
- train/datasets/book_review/test_nolabel.tsv +0 -0
- train/datasets/book_review/train.tsv +0 -0
- train/datasets/test_data/book_review/dev.tsv +201 -0
- train/datasets/test_data/book_review/test.tsv +201 -0
- train/datasets/test_data/book_review/test_nolabel.tsv +201 -0
- train/datasets/test_data/book_review/train.tsv +501 -0
- train/documents/llama.md +52 -0
- train/finetune/run_c3.py +215 -0
- train/finetune/run_chid.py +225 -0
- train/finetune/run_classifier.py +366 -0
- train/finetune/run_classifier_cv.py +173 -0
- train/finetune/run_classifier_deepspeed.py +212 -0
- train/finetune/run_classifier_grid.py +120 -0
- train/finetune/run_classifier_mt.py +203 -0
- train/finetune/run_classifier_multi_label.py +287 -0
- train/finetune/run_classifier_prompt.py +308 -0
- train/finetune/run_classifier_siamese.py +340 -0
- train/finetune/run_cmrc.py +447 -0
- train/finetune/run_dbqa.py +232 -0
- train/finetune/run_image_classifier.py +195 -0
- train/finetune/run_ner.py +339 -0
- train/finetune/run_regression.py +199 -0
- train/finetune/run_simcse.py +274 -0
- train/finetune/run_speech2text.py +311 -0
- train/finetune/run_text2text.py +314 -0
- train/inference/run_c3_infer.py +94 -0
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Jinheng Xie
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,13 +1,65 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div align=center>
|
2 |
+
<img src="visorgpt_title.png" width="400">
|
3 |
+
</div>
|
4 |
+
|
5 |
+
## Learning Visual Prior via Generative Pre-Training [[Arxiv](http://arxiv.org/abs/2305.13777)] [[Demo]()] [[Video](https://www.youtube.com/watch?v=8FDoBfxSY8I)]
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
<img src="demo.gif" width="1000">
|
8 |
+
|
9 |
+
## Updates
|
10 |
+
- Gradio demo is available.
|
11 |
+
- [Hugging Face demo will be available]().
|
12 |
+
|
13 |
+
## Quick Start
|
14 |
+
### Step 1
|
15 |
+
```
|
16 |
+
# clone the repo
|
17 |
+
git clone https://github.com/Sierkinhane/VisorGPT.git
|
18 |
+
|
19 |
+
# go to directory
|
20 |
+
cd VisorGPT
|
21 |
+
|
22 |
+
# create a new environment
|
23 |
+
conda create -n visorgpt python=3.8
|
24 |
+
|
25 |
+
# activate the new environment
|
26 |
+
conda activate visorgpt
|
27 |
+
|
28 |
+
# prepare the basic environments
|
29 |
+
pip3 install -r requirements.txt
|
30 |
+
|
31 |
+
# install controlnet and gligen
|
32 |
+
cd demo/ControlNet
|
33 |
+
pip3 install -v -e .
|
34 |
+
cd ../demo/GLIGEN
|
35 |
+
pip3 install -v -e .
|
36 |
+
```
|
37 |
+
### Step 2 - Download pre-trained weights
|
38 |
+
Download [visorgpt](https://drive.google.com/file/d/1Pk4UPNKBMH-0uRLmK5COYTca7FUrN8XY/view?usp=share_link), [controlnet-pose2img](https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_openpose.pth), [controlnet-sd](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors), [gligen-bbox2img](https://huggingface.co/gligen/gligen-generation-text-box/blob/main/diffusion_pytorch_model.bin), and put them as follow:
|
39 |
+
```
|
40 |
+
├── demo/
|
41 |
+
| ├── ckpts
|
42 |
+
| | ├── controlnet
|
43 |
+
| | | ├── control_v11p_sd15_openpose.pth
|
44 |
+
| | | ├── v1-5-pruned-emaonly.safetensors
|
45 |
+
| | ├── gligen
|
46 |
+
| | | ├── diffusion_pytorch_model_box.bin
|
47 |
+
| | ├── visorgpt
|
48 |
+
| | | ├── visorgpt_dagger_ta_tb.pt
|
49 |
+
```
|
50 |
+
|
51 |
+
### Step 3 - Run demo
|
52 |
+
```
|
53 |
+
CUDA_VISIBLE_DEVICES=0 python3 gradio_demo.py
|
54 |
+
```
|
55 |
+
|
56 |
+
If you are using our code, please consider citing our paper.
|
57 |
+
|
58 |
+
```
|
59 |
+
@article{xie2023visorgpt,
|
60 |
+
title={VisorGPT: Learning Visual Prior via Generative Pre-Training},
|
61 |
+
author={Xie, Jinheng and Ye, Kai and Li, Yudong and Li, Yuexiang and Lin, Kevin Qinghong and Zheng, Yefeng and Shen, Linlin and Shou, Mike Zheng},
|
62 |
+
journal={arXiv preprint arXiv:2305.13777},
|
63 |
+
year={2023}
|
64 |
+
}
|
65 |
+
```
|
app.py
ADDED
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from share import *
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import re
|
6 |
+
from PIL import Image
|
7 |
+
from tqdm import tqdm
|
8 |
+
from train.scripts.generate_lm_multiple import gen_sequence, build_visorgpt
|
9 |
+
from utils.seq2coord import gen_cond_mask
|
10 |
+
from gligen.gligen_inference_box import gligen_infer, build_gligen_model
|
11 |
+
from controlnet.gradio_pose2image_v2 import control_infer, build_control_model, build_controlv11_model
|
12 |
+
|
13 |
+
# init models
|
14 |
+
visorgpt_config_path = 'train/models/gpt2/config.json'
|
15 |
+
visorgpt_model_path = 'demo/ckpts/visorgpt/visorgpt_dagger_ta_tb.pt'
|
16 |
+
visorgpt_vocab_path = 'train/models/google_uncased_en_coord_vocab.txt'
|
17 |
+
|
18 |
+
# control_model_path = 'demo/ckpts/controlnet/control_sd15_openpose.pth'
|
19 |
+
control_model_path = 'demo/ckpts/controlnet/control_v11p_sd15_openpose.pth' # v1.1
|
20 |
+
control_sd_path = 'demo/ckpts/controlnet/v1-5-pruned-emaonly.safetensors'
|
21 |
+
control_model_config = 'demo/ckpts/controlnet/cldm_v15.yaml'
|
22 |
+
|
23 |
+
gligen_model_path = 'demo/ckpts/gligen/diffusion_pytorch_model_box.bin'
|
24 |
+
|
25 |
+
|
26 |
+
visorgpt_args, visorgpt_model = build_visorgpt(model_config=visorgpt_config_path,
|
27 |
+
model_path=visorgpt_model_path,
|
28 |
+
vocab_path=visorgpt_vocab_path)
|
29 |
+
control_model, ddim_sampler = build_controlv11_model(model_path=control_model_path,
|
30 |
+
sd_path=control_sd_path,
|
31 |
+
config_path=control_model_config)
|
32 |
+
|
33 |
+
# build gligen model
|
34 |
+
g_model, g_autoencoder, g_text_encoder, g_diffusion, \
|
35 |
+
g_config, g_grounding_tokenizer_input = build_gligen_model(ckpt=gligen_model_path)
|
36 |
+
|
37 |
+
|
38 |
+
# maximum number of instances
|
39 |
+
max_num_keypoint = 16
|
40 |
+
max_num_bbox = 16
|
41 |
+
max_num_mask = 8
|
42 |
+
|
43 |
+
def generate_sequence(gen_type,
|
44 |
+
data_type,
|
45 |
+
instance_size,
|
46 |
+
num_instance,
|
47 |
+
object_name_inbox):
|
48 |
+
|
49 |
+
ctn = True
|
50 |
+
|
51 |
+
if gen_type == 'key point':
|
52 |
+
num_keypoint = 18
|
53 |
+
if num_instance > max_num_keypoint:
|
54 |
+
num_instance = max_num_keypoint
|
55 |
+
|
56 |
+
seq_prompt = '; '.join([gen_type, data_type, instance_size, str(num_instance), str(num_keypoint)]) + ' ; [person'
|
57 |
+
|
58 |
+
elif gen_type == 'box' or gen_type == 'mask':
|
59 |
+
|
60 |
+
if not object_name_inbox.strip():
|
61 |
+
if gen_type == 'mask':
|
62 |
+
object_name_inbox = "bottle; cup"
|
63 |
+
else:
|
64 |
+
if data_type == 'object centric':
|
65 |
+
object_name_inbox = "great white shark"
|
66 |
+
else:
|
67 |
+
object_name_inbox = "person; frisbee"
|
68 |
+
|
69 |
+
num_keypoint = 0
|
70 |
+
|
71 |
+
if gen_type == 'mask':
|
72 |
+
if num_instance > max_num_mask:
|
73 |
+
num_instance = max_num_mask
|
74 |
+
if gen_type == 'box':
|
75 |
+
if num_instance > max_num_bbox:
|
76 |
+
num_instance = max_num_bbox
|
77 |
+
|
78 |
+
if data_type == 'object centric':
|
79 |
+
num_instance = 1
|
80 |
+
|
81 |
+
objects = ', '.join(object_name_inbox.strip().split(";"))
|
82 |
+
seq_prompt = '; '.join([gen_type, data_type, instance_size,
|
83 |
+
str(num_instance), str(num_keypoint)]) + '; ' + objects
|
84 |
+
|
85 |
+
if len(object_name_inbox.split(';')) > num_instance:
|
86 |
+
return {
|
87 |
+
raw_sequence: gr.update(
|
88 |
+
value="The umber of category names should be less than the number of instances, please try again :)",
|
89 |
+
visible=True)
|
90 |
+
}
|
91 |
+
|
92 |
+
print("input prompt: \n", seq_prompt)
|
93 |
+
sequence = gen_sequence(visorgpt_args, visorgpt_model, seq_prompt)
|
94 |
+
assert isinstance(sequence, list)
|
95 |
+
|
96 |
+
try:
|
97 |
+
cond_mask, cond_json = gen_cond_mask(sequence, ctn)
|
98 |
+
if gen_type == 'key point':
|
99 |
+
ori_sequence = cond_json[2]['sequences'][0][0] + '[SEP]'
|
100 |
+
elif gen_type == 'box':
|
101 |
+
ori_sequence = cond_json[0]['sequences'][0][0] + '[SEP]'
|
102 |
+
elif gen_type == 'mask':
|
103 |
+
ori_sequence = cond_json[1]['sequences'][0][0] + '[SEP]'
|
104 |
+
except:
|
105 |
+
cond_mask, cond_json = gen_cond_mask(sequence, not ctn)
|
106 |
+
if gen_type == 'key point':
|
107 |
+
ori_sequence = cond_json[2]['sequences'][0][0] + '[SEP]'
|
108 |
+
elif gen_type == 'box':
|
109 |
+
ori_sequence = cond_json[0]['sequences'][0][0] + '[SEP]'
|
110 |
+
elif gen_type == 'mask':
|
111 |
+
ori_sequence = cond_json[1]['sequences'][0][0] + '[SEP]'
|
112 |
+
|
113 |
+
ret_img = Image.fromarray(cond_mask)
|
114 |
+
|
115 |
+
if not gen_type == 'mask':
|
116 |
+
return {
|
117 |
+
result_gallery: [ret_img],
|
118 |
+
raw_sequence: gr.update(value=ori_sequence, visible=True),
|
119 |
+
images_button: gr.update(visible=True),
|
120 |
+
text_container: cond_json,
|
121 |
+
sequence_container: ori_sequence
|
122 |
+
}
|
123 |
+
else:
|
124 |
+
return {
|
125 |
+
result_gallery: [ret_img],
|
126 |
+
raw_sequence: gr.update(value=ori_sequence, visible=True),
|
127 |
+
images_button: gr.update(visible=False),
|
128 |
+
text_container: cond_json,
|
129 |
+
sequence_container: ori_sequence
|
130 |
+
}
|
131 |
+
|
132 |
+
def add_contents(gen_type,
|
133 |
+
data_type,
|
134 |
+
instance_size,
|
135 |
+
num_instance,
|
136 |
+
object_name_inbox,
|
137 |
+
num_continuous_gen,
|
138 |
+
global_seq):
|
139 |
+
|
140 |
+
ctn = True
|
141 |
+
|
142 |
+
if gen_type == 'key point':
|
143 |
+
num_keypoint = 18
|
144 |
+
seq_prompt = '; '.join([gen_type, data_type, instance_size, str(num_instance), str(num_keypoint)]) + ' ; [person'
|
145 |
+
|
146 |
+
if num_continuous_gen:
|
147 |
+
ctn = True
|
148 |
+
cur_instance = int(global_seq.split(';')[3].strip())
|
149 |
+
new_number = cur_instance + num_continuous_gen
|
150 |
+
if new_number > max_num_keypoint:
|
151 |
+
new_number = max_num_keypoint
|
152 |
+
|
153 |
+
# prompt type a
|
154 |
+
if global_seq.split(';')[5].find('[') == -1:
|
155 |
+
global_seq = global_seq.replace('[CLS]', '').replace('[SEP]', '')
|
156 |
+
objects = re.findall(re.compile(r'[\[](.*?)[]]', re.S), global_seq)
|
157 |
+
objects = ' '.join(['[ person' + x + ']' for x in objects])
|
158 |
+
seq_prompt = '; '.join([gen_type, data_type, instance_size, str(new_number), str(num_keypoint), objects])
|
159 |
+
# prompt type b
|
160 |
+
else:
|
161 |
+
global_seq = global_seq.replace('[CLS]', '').replace('[SEP]', '')
|
162 |
+
seq_list = global_seq.split(';')
|
163 |
+
seq_list[3] = str(new_number)
|
164 |
+
seq_prompt = ';'.join(seq_list)
|
165 |
+
|
166 |
+
elif gen_type == 'box' or gen_type == 'mask':
|
167 |
+
num_keypoint = 0
|
168 |
+
if data_type == 'object centric':
|
169 |
+
num_instance = 1
|
170 |
+
objects = ', '.join(object_name_inbox.strip().split(";"))
|
171 |
+
seq_prompt = '; '.join([gen_type, data_type, instance_size,
|
172 |
+
str(num_instance), str(num_keypoint)]) + '; ' + objects
|
173 |
+
if len(object_name_inbox.split(';')) > num_instance:
|
174 |
+
return {
|
175 |
+
raw_sequence: gr.update(value=f"The umber of category names should be less than the number of instances, please try again :)", visible=True)
|
176 |
+
}
|
177 |
+
|
178 |
+
if num_continuous_gen:
|
179 |
+
cur_instance = int(global_seq.split(';')[3].strip())
|
180 |
+
new_number = cur_instance + num_continuous_gen
|
181 |
+
|
182 |
+
if gen_type == 'mask':
|
183 |
+
if new_number > max_num_mask:
|
184 |
+
new_number = max_num_mask
|
185 |
+
if gen_type == 'box':
|
186 |
+
if new_number > max_num_bbox:
|
187 |
+
new_number = max_num_bbox
|
188 |
+
|
189 |
+
# prompt type a
|
190 |
+
if global_seq.split(';')[5].find('[') == -1:
|
191 |
+
global_seq = global_seq.replace('[CLS]', '').replace('[SEP]', '')
|
192 |
+
coords = re.findall(re.compile(r'[\[](.*?)[]]', re.S), global_seq)
|
193 |
+
|
194 |
+
objects = global_seq.split(';')[5].split(',')
|
195 |
+
objects = ' '.join(['[ ' + objects[i] + coords[i] + ']' for i in range(len(coords))])
|
196 |
+
|
197 |
+
seq_prompt = '; '.join([gen_type, data_type, instance_size, str(new_number), str(num_keypoint), objects])
|
198 |
+
# prompt type b
|
199 |
+
else:
|
200 |
+
global_seq = global_seq.replace('[CLS]', '').replace('[SEP]', '')
|
201 |
+
seq_list = global_seq.split(';')
|
202 |
+
seq_list[3] = str(new_number)
|
203 |
+
seq_prompt = ';'.join(seq_list)
|
204 |
+
|
205 |
+
# import ipdb;ipdb.set_trace()
|
206 |
+
print("input prompt: \n", seq_prompt)
|
207 |
+
with torch.no_grad():
|
208 |
+
sequence = gen_sequence(visorgpt_args, visorgpt_model, seq_prompt)
|
209 |
+
torch.cuda.empty_cache()
|
210 |
+
|
211 |
+
assert isinstance(sequence, list)
|
212 |
+
|
213 |
+
try:
|
214 |
+
cond_mask, cond_json = gen_cond_mask(sequence, ctn)
|
215 |
+
if gen_type == 'key point':
|
216 |
+
ori_sequence = cond_json[2]['sequences'][0][0] + '[SEP]'
|
217 |
+
elif gen_type == 'box':
|
218 |
+
ori_sequence = cond_json[0]['sequences'][0][0] + '[SEP]'
|
219 |
+
elif gen_type == 'mask':
|
220 |
+
ori_sequence = cond_json[1]['sequences'][0][0] + '[SEP]'
|
221 |
+
except:
|
222 |
+
cond_mask, cond_json = gen_cond_mask(sequence, not ctn)
|
223 |
+
if gen_type == 'key point':
|
224 |
+
ori_sequence = cond_json[2]['sequences'][0][0] + '[SEP]'
|
225 |
+
elif gen_type == 'box':
|
226 |
+
ori_sequence = cond_json[0]['sequences'][0][0] + '[SEP]'
|
227 |
+
elif gen_type == 'mask':
|
228 |
+
ori_sequence = cond_json[1]['sequences'][0][0] + '[SEP]'
|
229 |
+
|
230 |
+
ret_img = Image.fromarray(cond_mask)
|
231 |
+
|
232 |
+
|
233 |
+
if not gen_type == 'mask':
|
234 |
+
return {
|
235 |
+
result_gallery: [ret_img],
|
236 |
+
raw_sequence: gr.update(value=ori_sequence, visible=True),
|
237 |
+
images_button: gr.update(visible=True),
|
238 |
+
text_container: cond_json,
|
239 |
+
sequence_container: ori_sequence
|
240 |
+
}
|
241 |
+
else:
|
242 |
+
return {
|
243 |
+
result_gallery: [ret_img],
|
244 |
+
raw_sequence: gr.update(value=ori_sequence, visible=True),
|
245 |
+
images_button: gr.update(visible=False),
|
246 |
+
text_container: cond_json,
|
247 |
+
sequence_container: ori_sequence
|
248 |
+
}
|
249 |
+
|
250 |
+
def generate_images(gen_type,
|
251 |
+
num_samples,
|
252 |
+
ddim_steps,
|
253 |
+
object_prompt,
|
254 |
+
seed,
|
255 |
+
global_text,
|
256 |
+
global_seq):
|
257 |
+
|
258 |
+
if gen_type == 'key point':
|
259 |
+
|
260 |
+
data = global_text[2]['keypoints']
|
261 |
+
idx = np.arange(len(data))
|
262 |
+
split_idx = list(np.array_split(idx, 1)[0])
|
263 |
+
for idx in tqdm(split_idx):
|
264 |
+
item = data[idx]
|
265 |
+
keypoint_list = []
|
266 |
+
for ins in item:
|
267 |
+
kv = list(ins.items())[0]
|
268 |
+
keypoint = (np.array(kv[1])).tolist()
|
269 |
+
keypoint_list.append(keypoint)
|
270 |
+
|
271 |
+
with torch.no_grad():
|
272 |
+
ret_img = control_infer(model=control_model,
|
273 |
+
ddim_sampler=ddim_sampler,
|
274 |
+
keypoint_list=keypoint_list,
|
275 |
+
prompt=object_prompt.strip(),
|
276 |
+
num_samples=num_samples,
|
277 |
+
ddim_steps=ddim_steps,
|
278 |
+
seed=seed)
|
279 |
+
torch.cuda.empty_cache()
|
280 |
+
|
281 |
+
elif gen_type == 'box':
|
282 |
+
|
283 |
+
data = global_text[0]['bboxes']
|
284 |
+
|
285 |
+
with torch.no_grad():
|
286 |
+
ret_img = gligen_infer(model=g_model,
|
287 |
+
autoencoder=g_autoencoder,
|
288 |
+
text_encoder=g_text_encoder,
|
289 |
+
diffusion=g_diffusion,
|
290 |
+
config=g_config,
|
291 |
+
grounding_tokenizer_input=g_grounding_tokenizer_input,
|
292 |
+
context_prompt=object_prompt.strip(),
|
293 |
+
bbox_lists=data,
|
294 |
+
ddim_steps=ddim_steps,
|
295 |
+
batch_size=num_samples,
|
296 |
+
seed=seed)
|
297 |
+
torch.cuda.empty_cache()
|
298 |
+
|
299 |
+
if not gen_type == 'mask':
|
300 |
+
return {
|
301 |
+
result_gallery: ret_img,
|
302 |
+
text_container: global_text,
|
303 |
+
sequence_container: global_seq
|
304 |
+
}
|
305 |
+
else:
|
306 |
+
return {
|
307 |
+
raw_sequence: "sequence to mask is not supported yet :)",
|
308 |
+
text_container: global_text,
|
309 |
+
sequence_container: global_seq
|
310 |
+
}
|
311 |
+
|
312 |
+
|
313 |
+
def object_name_inbox_fn(gen_type):
|
314 |
+
|
315 |
+
if gen_type == 'key point':
|
316 |
+
return {
|
317 |
+
object_name_inbox: gr.update(visible=False),
|
318 |
+
data_type: gr.update(choices=['multiple instances']),
|
319 |
+
images_button: gr.update(value='Synthesize images using ControlNet'),
|
320 |
+
ddim_steps: gr.update(value=20),
|
321 |
+
object_prompt: gr.update(placeholder='in suit'),
|
322 |
+
num_instance: gr.update(visible=True, minimum=1, maximum=16, value=2, step=1),
|
323 |
+
sequence_container: None
|
324 |
+
}
|
325 |
+
elif gen_type == 'box':
|
326 |
+
return {
|
327 |
+
object_name_inbox: gr.update(visible=True, value='person; frisbee'),
|
328 |
+
data_type: gr.update(choices=['multiple instances', 'object centric']),
|
329 |
+
images_button: gr.update(value='Synthesize images using GLIGEN'),
|
330 |
+
ddim_steps: gr.update(value=50),
|
331 |
+
object_prompt: gr.update(placeholder='man and frisbee'),
|
332 |
+
num_instance: gr.update(visible=True, minimum=1, maximum=16, value=2, step=1),
|
333 |
+
sequence_container: None
|
334 |
+
}
|
335 |
+
|
336 |
+
elif gen_type == 'mask':
|
337 |
+
return {
|
338 |
+
object_name_inbox: gr.update(visible=True,
|
339 |
+
label="MS COCO categories to be generated (separated by semicolon)", value='bottle; cup'),
|
340 |
+
data_type: gr.update(choices=['multiple instances']),
|
341 |
+
images_button: gr.update(value='Synthesize images using GLIGEN'),
|
342 |
+
ddim_steps: gr.update(value=50),
|
343 |
+
object_prompt: gr.update(placeholder='bottle and cup'),
|
344 |
+
num_instance: gr.update(visible=True, minimum=1, maximum=8, value=2, step=1),
|
345 |
+
sequence_container: None
|
346 |
+
}
|
347 |
+
|
348 |
+
|
349 |
+
def instance_type_change_fn(data_type):
|
350 |
+
|
351 |
+
if data_type == 'multiple instances':
|
352 |
+
return {
|
353 |
+
md_title: gr.update(visible=True),
|
354 |
+
num_continuous_gen: gr.update(visible=True),
|
355 |
+
continuous_btn: gr.update(visible=True),
|
356 |
+
object_name_inbox: gr.update(label="MS COCO categories to be generated (separated by semicolon)", value='person; frisbee'),
|
357 |
+
object_prompt: gr.update(placeholder='man and frisbee'),
|
358 |
+
num_instance: gr.update(visible=True, minimum=1, maximum=16, value=2, step=1),
|
359 |
+
}
|
360 |
+
|
361 |
+
elif data_type == 'object centric':
|
362 |
+
return {
|
363 |
+
md_title: gr.update(visible=False),
|
364 |
+
num_continuous_gen: gr.update(visible=False),
|
365 |
+
continuous_btn: gr.update(visible=False),
|
366 |
+
object_name_inbox: gr.update(label="ImageNet-1K categories to be generated", value='great white shark'),
|
367 |
+
object_prompt: gr.update(placeholder='great white shark'),
|
368 |
+
num_instance: gr.update(visible=False, value=1),
|
369 |
+
}
|
370 |
+
|
371 |
+
block = gr.Blocks()
|
372 |
+
with block:
|
373 |
+
|
374 |
+
text_container = gr.State()
|
375 |
+
sequence_container = gr.State()
|
376 |
+
|
377 |
+
gr.Markdown('<div align=center> <img src="file/visorgpt_title_all.jpg" width = "100%" height = "100%" /> </div>')
|
378 |
+
|
379 |
+
with gr.Row():
|
380 |
+
with gr.Column():
|
381 |
+
|
382 |
+
gr.Markdown("### Params to generate sequences")
|
383 |
+
gen_type = gr.inputs.Dropdown(choices=['key point', 'box', 'mask'], type='value', default='key point', label='Anotation Type')
|
384 |
+
data_type = gr.inputs.Dropdown(choices=['multiple instances'], type='value', default='multiple instances', label='Data Type')
|
385 |
+
instance_size = gr.inputs.Dropdown(choices=['small', 'medium', 'large'], type='value', default='large', label='Instance Size')
|
386 |
+
num_instance = gr.Slider(label="Number of instances per image", minimum=1, maximum=16, value=2, step=1)
|
387 |
+
object_name_inbox = gr.Textbox(label="MS COCO categories to be generated (separated by semicolon)", placeholder="person; frisbee", visible=False)
|
388 |
+
sequence_button = gr.Button(value="Customize sequential output")
|
389 |
+
|
390 |
+
|
391 |
+
md_title = gr.Markdown("### Continuous generation (Optional)")
|
392 |
+
num_continuous_gen = gr.Slider(label="Add instances to the current scene", minimum=1, maximum=16, value=1, step=1)
|
393 |
+
|
394 |
+
continuous_btn = gr.Button(value="Add instances to the current scene")
|
395 |
+
|
396 |
+
gr.Markdown("### Params to synthesize images")
|
397 |
+
object_prompt = gr.Textbox(label="Context Prompt", placeholder="in suit", visible=True)
|
398 |
+
num_samples = gr.Slider(label="Batch Size", minimum=1, maximum=36, value=1, step=1)
|
399 |
+
ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
|
400 |
+
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
|
401 |
+
images_button = gr.Button(value="Synthesize images using ControlNet", visible=False)
|
402 |
+
|
403 |
+
|
404 |
+
with gr.Column():
|
405 |
+
raw_sequence = gr.Textbox(label="Raw Sequence", visible=False)
|
406 |
+
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto', preview=True)
|
407 |
+
|
408 |
+
gen_type.change(object_name_inbox_fn, inputs=[gen_type],
|
409 |
+
outputs=[object_name_inbox, data_type, images_button, ddim_steps, object_prompt, num_instance, sequence_container])
|
410 |
+
|
411 |
+
data_type.change(instance_type_change_fn, inputs=[data_type],
|
412 |
+
outputs=[md_title, num_continuous_gen, continuous_btn, object_name_inbox, object_prompt, num_instance])
|
413 |
+
|
414 |
+
|
415 |
+
ips = [gen_type, data_type, instance_size, num_instance, object_name_inbox]
|
416 |
+
sequence_button.click(fn=generate_sequence, inputs=ips, outputs=[result_gallery, raw_sequence, images_button, text_container, sequence_container])
|
417 |
+
|
418 |
+
ips = [gen_type, data_type, instance_size, num_instance, object_name_inbox, num_continuous_gen, sequence_container]
|
419 |
+
continuous_btn.click(fn=add_contents, inputs=ips, outputs=[result_gallery, raw_sequence, images_button, text_container, sequence_container])
|
420 |
+
|
421 |
+
ips = [gen_type, num_samples, ddim_steps, object_prompt, seed, text_container, sequence_container]
|
422 |
+
images_button.click(fn=generate_images, inputs=ips, outputs=[result_gallery, raw_sequence, text_container, sequence_container])
|
423 |
+
|
424 |
+
block.launch(server_name='0.0.0.0', server_port=10086, debug=False, share=False)
|
425 |
+
|
demo/ckpts/controlnet/cldm_v15.yaml
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
target: visor_controlnet.cldm.cldm.ControlLDM
|
3 |
+
params:
|
4 |
+
linear_start: 0.00085
|
5 |
+
linear_end: 0.0120
|
6 |
+
num_timesteps_cond: 1
|
7 |
+
log_every_t: 200
|
8 |
+
timesteps: 1000
|
9 |
+
first_stage_key: "jpg"
|
10 |
+
cond_stage_key: "txt"
|
11 |
+
control_key: "hint"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
only_mid_control: False
|
20 |
+
|
21 |
+
control_stage_config:
|
22 |
+
target: visor_controlnet.cldm.cldm.ControlNet
|
23 |
+
params:
|
24 |
+
image_size: 32 # unused
|
25 |
+
in_channels: 4
|
26 |
+
hint_channels: 3
|
27 |
+
model_channels: 320
|
28 |
+
attention_resolutions: [ 4, 2, 1 ]
|
29 |
+
num_res_blocks: 2
|
30 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
31 |
+
num_heads: 8
|
32 |
+
use_spatial_transformer: True
|
33 |
+
transformer_depth: 1
|
34 |
+
context_dim: 768
|
35 |
+
use_checkpoint: True
|
36 |
+
legacy: False
|
37 |
+
|
38 |
+
unet_config:
|
39 |
+
target: visor_controlnet.cldm.cldm.ControlledUnetModel
|
40 |
+
params:
|
41 |
+
image_size: 32 # unused
|
42 |
+
in_channels: 4
|
43 |
+
out_channels: 4
|
44 |
+
model_channels: 320
|
45 |
+
attention_resolutions: [ 4, 2, 1 ]
|
46 |
+
num_res_blocks: 2
|
47 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
48 |
+
num_heads: 8
|
49 |
+
use_spatial_transformer: True
|
50 |
+
transformer_depth: 1
|
51 |
+
context_dim: 768
|
52 |
+
use_checkpoint: True
|
53 |
+
legacy: False
|
54 |
+
|
55 |
+
first_stage_config:
|
56 |
+
target: visor_controlnet.ldm.models.autoencoder.AutoencoderKL
|
57 |
+
params:
|
58 |
+
embed_dim: 4
|
59 |
+
monitor: val/rec_loss
|
60 |
+
ddconfig:
|
61 |
+
double_z: true
|
62 |
+
z_channels: 4
|
63 |
+
resolution: 256
|
64 |
+
in_channels: 3
|
65 |
+
out_ch: 3
|
66 |
+
ch: 128
|
67 |
+
ch_mult:
|
68 |
+
- 1
|
69 |
+
- 2
|
70 |
+
- 4
|
71 |
+
- 4
|
72 |
+
num_res_blocks: 2
|
73 |
+
attn_resolutions: []
|
74 |
+
dropout: 0.0
|
75 |
+
lossconfig:
|
76 |
+
target: torch.nn.Identity
|
77 |
+
|
78 |
+
cond_stage_config:
|
79 |
+
target: visor_controlnet.ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
demo/ckpts/controlnet/control_v11p_sd15_openpose.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:db97becd92cd19aff71352a60e93c2508decba3dee64f01f686727b9b406a9dd
|
3 |
+
size 1445235707
|
demo/ckpts/controlnet/idle
ADDED
File without changes
|
demo/ckpts/controlnet/v1-5-pruned-emaonly.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa
|
3 |
+
size 4265146304
|
demo/ckpts/gligen/diffusion_pytorch_model_box.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5f3d2d5ec6e01c7ad7ca811a39904db675d1c5fccfeca9d34d63e4bf65ccd7b
|
3 |
+
size 6775067861
|
demo/ckpts/gligen/idle
ADDED
File without changes
|
demo/ckpts/visorgpt/idle
ADDED
File without changes
|
demo/ckpts/visorgpt/visorgpt_dagger_ta_tb.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ad8c442caf1ec58accc97dbc5b0636e4398a9853cf6e6475f5be01f087da245
|
3 |
+
size 219003175
|
requirements.txt
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==1.4.0
|
2 |
+
addict==2.4.0
|
3 |
+
aiofiles==23.1.0
|
4 |
+
aiohttp==3.8.4
|
5 |
+
aiosignal==1.3.1
|
6 |
+
altair==4.2.2
|
7 |
+
antlr4-python3-runtime==4.9.3
|
8 |
+
anyio==3.6.2
|
9 |
+
asttokens==2.2.1
|
10 |
+
async-timeout==4.0.2
|
11 |
+
attrs==22.2.0
|
12 |
+
backcall==0.2.0
|
13 |
+
basicsr==1.4.2
|
14 |
+
blinker==1.5
|
15 |
+
brotlipy==0.7.0
|
16 |
+
cachetools==5.3.0
|
17 |
+
certifi @ file:///croot/certifi_1671487769961/work/certifi
|
18 |
+
cffi @ file:///tmp/abs_98z5h56wf8/croots/recipe/cffi_1659598650955/work
|
19 |
+
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
|
20 |
+
click==8.1.3
|
21 |
+
clip==0.2.0
|
22 |
+
contourpy==1.0.7
|
23 |
+
# Editable install with no version control (controlnet==1.0)
|
24 |
+
-e /home/cvi_demo/PPSM/demo/ControlNet
|
25 |
+
coqpit==0.0.17
|
26 |
+
cryptography @ file:///croot/cryptography_1673298753778/work
|
27 |
+
cycler==0.11.0
|
28 |
+
decorator==5.1.1
|
29 |
+
einops==0.6.0
|
30 |
+
elasticsearch==1.9.0
|
31 |
+
entrypoints==0.4
|
32 |
+
executing==1.2.0
|
33 |
+
fastapi==0.95.0
|
34 |
+
ffmpy==0.3.0
|
35 |
+
filelock==3.11.0
|
36 |
+
flit-core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
|
37 |
+
fonttools==4.39.3
|
38 |
+
frozenlist==1.3.3
|
39 |
+
fsspec==2023.4.0
|
40 |
+
ftfy==6.1.1
|
41 |
+
future==0.18.3
|
42 |
+
# Editable install with no version control (gligen==1.0)
|
43 |
+
-e /home/cvi_demo/PPSM/demo/GLIGEN
|
44 |
+
google-auth==2.17.2
|
45 |
+
google-auth-oauthlib==1.0.0
|
46 |
+
gradio==3.25.0
|
47 |
+
gradio-client==0.0.10
|
48 |
+
grpcio==1.51.3
|
49 |
+
h11==0.14.0
|
50 |
+
httpcore==0.17.0
|
51 |
+
httpx==0.24.0
|
52 |
+
huggingface-hub==0.13.4
|
53 |
+
idna @ file:///croot/idna_1666125576474/work
|
54 |
+
imageio==2.27.0
|
55 |
+
importlib-metadata==6.1.0
|
56 |
+
importlib-resources==5.12.0
|
57 |
+
ipdb==0.13.13
|
58 |
+
ipython==8.11.0
|
59 |
+
iso8601==1.1.0
|
60 |
+
jedi==0.18.2
|
61 |
+
Jinja2==3.1.2
|
62 |
+
jsonschema==4.17.3
|
63 |
+
kiwisolver==1.4.4
|
64 |
+
kornia==0.6.10
|
65 |
+
lazy-loader==0.2
|
66 |
+
lightning-utilities==0.8.0
|
67 |
+
linkify-it-py==2.0.0
|
68 |
+
lmdb==1.4.0
|
69 |
+
Markdown==3.4.3
|
70 |
+
markdown-it-py==2.2.0
|
71 |
+
MarkupSafe==2.1.2
|
72 |
+
matplotlib==3.7.1
|
73 |
+
matplotlib-inline==0.1.6
|
74 |
+
mdit-py-plugins==0.3.3
|
75 |
+
mdurl==0.1.2
|
76 |
+
mkl-fft==1.3.1
|
77 |
+
mkl-random @ file:///tmp/build/80754af9/mkl_random_1626186064646/work
|
78 |
+
mkl-service==2.4.0
|
79 |
+
multidict==6.0.4
|
80 |
+
networkx==3.0
|
81 |
+
numpy @ file:///tmp/abs_653_j00fmm/croots/recipe/numpy_and_numpy_base_1659432701727/work
|
82 |
+
oauthlib==3.2.2
|
83 |
+
omegaconf==2.3.0
|
84 |
+
open-clip-torch==2.16.0
|
85 |
+
opencv-python==4.7.0.72
|
86 |
+
orjson==3.8.10
|
87 |
+
packaging==23.0
|
88 |
+
pandas==2.0.0
|
89 |
+
parso==0.8.3
|
90 |
+
pexpect==4.8.0
|
91 |
+
pickleshare==0.7.5
|
92 |
+
Pillow==9.3.0
|
93 |
+
pkgutil-resolve-name==1.3.10
|
94 |
+
prompt-toolkit==3.0.38
|
95 |
+
protobuf==3.19.6
|
96 |
+
psutil==5.9.4
|
97 |
+
ptyprocess==0.7.0
|
98 |
+
pure-eval==0.2.2
|
99 |
+
pyasn1==0.4.8
|
100 |
+
pyasn1-modules==0.2.8
|
101 |
+
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
|
102 |
+
pydantic==1.10.7
|
103 |
+
pyDeprecate==0.3.2
|
104 |
+
pydub==0.25.1
|
105 |
+
Pygments==2.14.0
|
106 |
+
PyJWT==2.6.0
|
107 |
+
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
|
108 |
+
pyparsing==3.0.9
|
109 |
+
pyrsistent==0.19.3
|
110 |
+
PySocks @ file:///tmp/build/80754af9/pysocks_1605305779399/work
|
111 |
+
python-dateutil==2.8.2
|
112 |
+
python-multipart==0.0.6
|
113 |
+
pytorch-lightning==1.6.5
|
114 |
+
pytz==2023.3
|
115 |
+
PyWavelets==1.4.1
|
116 |
+
PyYAML==6.0
|
117 |
+
regex==2023.3.23
|
118 |
+
requests @ file:///opt/conda/conda-bld/requests_1657734628632/work
|
119 |
+
requests-oauthlib==1.3.1
|
120 |
+
rsa==4.9
|
121 |
+
scikit-image==0.20.0
|
122 |
+
scipy==1.9.1
|
123 |
+
semantic-version==2.10.0
|
124 |
+
sentencepiece==0.1.97
|
125 |
+
share==1.0.4
|
126 |
+
six @ file:///tmp/build/80754af9/six_1644875935023/work
|
127 |
+
sniffio==1.3.0
|
128 |
+
soundfile==0.12.1
|
129 |
+
stack-data==0.6.2
|
130 |
+
starlette==0.26.1
|
131 |
+
tb-nightly==2.13.0a20230410
|
132 |
+
tensorboard==2.12.0
|
133 |
+
tensorboard-data-server==0.7.0
|
134 |
+
tensorboard-plugin-wit==1.8.1
|
135 |
+
tensorboardX==2.6
|
136 |
+
tifffile==2023.3.21
|
137 |
+
timm==0.6.13
|
138 |
+
tokenizers==0.13.2
|
139 |
+
tomli==2.0.1
|
140 |
+
toolz==0.12.0
|
141 |
+
torch==1.12.1
|
142 |
+
torchmetrics==0.11.4
|
143 |
+
torchvision==0.13.1
|
144 |
+
tqdm==4.65.0
|
145 |
+
traitlets==5.9.0
|
146 |
+
transformers==4.27.4
|
147 |
+
typing-extensions @ file:///croot/typing_extensions_1669924550328/work
|
148 |
+
tzdata==2023.3
|
149 |
+
uc-micro-py==1.0.1
|
150 |
+
urllib3 @ file:///croot/urllib3_1673575502006/work
|
151 |
+
uvicorn==0.21.1
|
152 |
+
wcwidth==0.2.6
|
153 |
+
websockets==11.0.1
|
154 |
+
Werkzeug==2.2.3
|
155 |
+
yapf==0.32.0
|
156 |
+
yarl==1.8.2
|
157 |
+
zipp==3.15.0
|
158 |
+
visor_controlnet
|
159 |
+
visor-gligen==1.1
|
160 |
+
visor-controlnet==1.1
|
train/README.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
The code is highly based on [TencentPretrain](https://github.com/Tencent/TencentPretrain).
|
train/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .scripts import *
|
train/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (152 Bytes). View file
|
|
train/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (152 Bytes). View file
|
|
train/beginning.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
key point; Multiple instances; 5; 14; medium;
|
train/corpora/CLUECorpusSmall_bert_sampled.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/corpora/CLUECorpusSmall_sampled.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/corpora/book_review.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/corpora/book_review_bert.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/corpora/book_review_cls.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/datasets/book_review/dev.tsv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/datasets/book_review/test.tsv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/datasets/book_review/test_nolabel.tsv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/datasets/book_review/train.tsv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/datasets/test_data/book_review/dev.tsv
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
label text_a
|
2 |
+
1 洋洋洒洒五百多页,基本全部是伍迪·艾伦在说,内容几乎没有纰漏,不过重复的地方也不少。
|
3 |
+
1 写类型片编剧的宝贝,虽然偏向商业化,但是也能学到很多对剧本节奏的建议
|
4 |
+
1 真会讲故事啊
|
5 |
+
1 一知半解的德棍只喜欢德三,真正愿意探寻历史的德粉,也许更偏好普鲁士
|
6 |
+
0 弃
|
7 |
+
0 很生气,一晚上看完,只有生气。太矫情了
|
8 |
+
1 这个标题真是太贴切了,真的是罪,真的是美。
|
9 |
+
1 想起导师改我文章的时候就像"写作风格"那章写的那样改的,看着眼熟。还是一样啊,技术可以通过反复模仿习得,但是角度却不能随着数量的叠加而变得更加独到,就像年少时的学霸和学神,以及好的导演让人心满意足的标上四星,而有灵性的让人一个冲动就满星了。
|
10 |
+
1 拍电影极生气时候导拍断过手,踢断过腿,真是个暴烈的人呀。
|
11 |
+
0 两星到三星之间的作品
|
12 |
+
1 在瞎子的世界,谁有一只眼睛谁就是国王
|
13 |
+
1 你这死尸的囚徒!(还是囚徒的死尸来着?)
|
14 |
+
1 清平定准葛尔标志着游牧帝国的结束
|
15 |
+
0 没有才调,看在材料份上加一星。
|
16 |
+
0 我对这个作者没爱,了了
|
17 |
+
1 比 《认识电影》深奥。。。。
|
18 |
+
1 历史书做成这样真是太赞了!
|
19 |
+
1 电影更值得一看
|
20 |
+
0 我只想说 我为了跟上潮流==
|
21 |
+
1 英文名称与中文译名都不错,引人入胜的小说。电视剧也很棒。
|
22 |
+
1 我的帝王生涯
|
23 |
+
0 这个推理特别牵强 从一开始就能猜到最后的大boss必然是他身边的几个同事之一 看起来最傻的那个最可疑 游戏线写的还不错 可生活线太糟了 埋伏笔埋得太刻意 称不上是一本好的悬疑
|
24 |
+
1 伍迪艾伦把工作和私生活分的很开,即使是被前妻起诉,外界对他议论纷纷的时候,他还是保持工作的节奏,把官司交给律师,不像国内的那些跳梁小丑,消费自己的生活,只为一点关注度。
|
25 |
+
0 希望看到的是活生生的人,可惜只看到假大空的神
|
26 |
+
0 废话太多
|
27 |
+
0 啥玩意,情色系的啊。故事一般。看的困。
|
28 |
+
1 文人骨子里的东西~其实我读起来有点像文人版的麦田里的守望者~
|
29 |
+
1 普通法等。翻译假装要扣一星。电子版反倒没有删节。
|
30 |
+
0 你是猴子请来的逗逼吗。
|
31 |
+
0 人生读过最狗血的书之一 除了对了解穆斯林信仰风俗有所帮助之外都是狗血
|
32 |
+
1 读胡金铨在韩国拍摄空山灵雨一段,越发觉得空有一番热忱是什么都做不成的,要与环境做各种妥协、交换,也要有自身的主心骨,切勿随波逐流,乱了方寸。
|
33 |
+
1 什么叫震撼人心?!
|
34 |
+
1 照片珍贵。
|
35 |
+
1 偶的启蒙书之一~
|
36 |
+
1 看片量差远了,回头再回锅
|
37 |
+
0 两章果断弃!
|
38 |
+
1 从农村到城市,中国的城乡进程讲述的很详细,历史脉络清晰。
|
39 |
+
0 跟看我高中同学的日记本差不多。
|
40 |
+
0 这种硬科幻,真是受不了。
|
41 |
+
1 作为资深影迷,这本书必读
|
42 |
+
1 现实与荒诞,耶稣与撒旦,奇诡的想象恣意横生、酣畅淋漓。趁热打铁,可以把剧看起来了,谢谢木木老师的码~~
|
43 |
+
0 小白,后面没怎么虐楠竹。
|
44 |
+
0 一句话我就不想看了:诸葛亮在刘表身边,他却不能发现,这个人好蠢。。。什么人才是二十一世纪最重要的。历史这种解读,真是醉了。还有那么高的分,可见一斑。
|
45 |
+
1 这书有毒!
|
46 |
+
0 为了曲折和煽情而造作,悲剧得不可思议
|
47 |
+
1 终于在这个被工作和学习双重折磨的阶段,读完了岳南先生的这部大作。。虽然只有120万字,但是作者耗费的心力应该有一亿两千万字。当读到穆旦在深夜冒着严寒走几个小时,只为了去给因文革而几个月没见的妻子送去几块糖,这样的感动让人无法言喻。这本书五星推荐!
|
48 |
+
1 受益匪浅
|
49 |
+
0 帮人查点东西又翻出来了。错讹哪些属于有意哪些归于无心也不用多想。不过最近居然听闻有人认为讲古代史的文献须核对,讲现当代历史的大段引文则“意思对就行”不必强求,连出处都可以不标注。若不是故意甩出双重标准,那就是真心无知且不怕惹人耻笑。所谓“意思对”简直是自黑。出版规范先不讨论,近现代文化史艺术史扯不清楚,原因真是丰富多彩。
|
50 |
+
1 虽为一家之言,不过很有启发
|
51 |
+
1 (⊙o⊙)。。卧。。槽。。
|
52 |
+
1 果不其然地掉泪,人之初,性自由。忽然觉得很对。
|
53 |
+
0 精与屎
|
54 |
+
1 电影就是这么神奇,简直不可思议,一寸移动,一个角度改变,一个连接,哪怕只多停留1秒,观众就能感知到巨大的情感震撼。别问为什么,要问问心理学家。此书可以算拉片总结,部分镜头在大量阅片和经验中已经成为了意识惯性。
|
55 |
+
0 希望有一天自己可以孤独的去旅行吧
|
56 |
+
1 只看了篇简单的书评介绍就被迷住了,这本书似乎能解决我的许多疑惑,就像当年遇到弗雷泽《金枝》的那种豁然开朗,所以,���一定要看看。读到“二十,放马天山雪中草”为止,等资源。
|
57 |
+
1 人类绘制地图的历史也像人类理解自然世界的历史一样,充满着曲折,我们的视野被技术、能力、宗教、政治、经济等外部因素不断影响,而没一副地图也代表了一个时代和一种观点,说明了我们是如何一步步走来的。震撼人心的一本书,实在太棒!
|
58 |
+
0 网上找的,看到三分之一还在罗里吧嗦的介绍人物性格,看不下去了弃文
|
59 |
+
0 罗里吧嗦
|
60 |
+
1 喜欢这种感觉的书~
|
61 |
+
1 好的通史绝不是泛泛而论,而是有自己可称作主心骨的意见。钱穆此书作于全民抗战时,国族危急存亡之秋自然有许多人检讨乃至彻底否定过去数千年历史。钱穆则认为以一时一地的得失一边倒地臧否过去乃是失智。譬如他认为中国古代(除清朝外)之无民主代议制是必然,因为该制度根本就不必要(参页三九九)。当然,钱穆也并不避讳历代政治之失误。有许多人认为钱穆亲中贬西,我觉得要从其讨论的背景才能理解他的立场。
|
62 |
+
0 桐华你是有多恨姓孟的
|
63 |
+
0 我要这天,再遮不住我眼;要这地,再埋不了我心。要这众生,都明白我意,要那诸佛,都烟消云散! 我可以忘了我自己,可是,我忘不了东海水,忘不了花果山,忘不了西天路,忘不了路上的人。
|
64 |
+
0 写得太差了!絮絮叨叨完全不像她的文字风格,只有为虐而虐的传统保持下来了,完全看不下去。
|
65 |
+
0 这次东野先生尝试了关于脑死亡与器官移植的话题,在一些小的片段上也延续了他作为一个推理小说家善于营造气氛的特点,然而这本书的故事性实在是太弱,书中的人物关系也过于浅尝辄止,以至于有很大的篇幅让人感觉是在注水,最后虽然给了一个皆大欢喜的结局,却又显得很刻意。
|
66 |
+
0 “二十岁就知道的事,却花了一辈子来证实。爱情,正如死亡,不由分说。”萨冈的短篇太弱了
|
67 |
+
1 对胡金铨有一个详细的访谈,也有一些八卦。
|
68 |
+
1 政治追求与爱恨情仇…
|
69 |
+
1 希望多年后的我能认真地重读这本书
|
70 |
+
1 意识流日记。搞艺术的人我真的从来不懂,但文笔和封面一样美……
|
71 |
+
1 有收获。有励到我的志
|
72 |
+
0 超不喜欢结局
|
73 |
+
1 在我读过的少得可怜的诺奖作品中,这是最喜欢的一本。细节之处很见功力,描写优美又不显拖沓。并非教徒的人显然可以把“你往何处去”的主语换成自己心中的某种力量或准则,一样有震撼人心的美。“毒杀你的人民吧,可别再跳舞了”。这句简直是今年阅读的年度之句。显克微支大大这里有一只你的脑残粉!!!
|
74 |
+
1 很踏实和基础,用书的形式体现影像变化有点吃力,但作者已经描述得很不错。对于构建剪辑学的整体脉络有帮助。
|
75 |
+
1 咦居然看过。但是只记得不要和胖友吃饭因为胖会传染了😂
|
76 |
+
0 感觉作者开创了一个新的文学流派:中国历史轻小说……为了捧偶像传主的臭脚,竟不惜篡改史料原文、以论带史甚至主观臆造,读之瞠目结舌。后记更是彰显了日本史学界的无知,竟有说雍正是明朝年号,我还说德川家康是日本天皇哩……
|
77 |
+
0 傻书。注解比原文还多,看着累,到后面就跳过不看。光看文,一个字。
|
78 |
+
0 不知道是翻译的问题还是原来的文字就是这么样,不喜欢。故事很《读者》之类的。
|
79 |
+
1 理论系列,比较深,需要常读。。。
|
80 |
+
0 实在不是我喜欢的文体
|
81 |
+
0 我当时准备买这本书时,看豆瓣评分9.5,于是就买了这本书.最近看完才发现,完全不值得这么高的评分.翻译还算凑合,一些名字,图片的和正文的不符合。内容不是很好,就是讲了英格兰的国王和一些伟人的算是历史的故事,算不上英格兰简史,我看可以称之为英格兰著名人物史,一些个人认为比较重要的事件都没有谈到,或涉及很少.如果想打发时间,就当读故事看看得了.没啥可读性.就是书的纸张、印刷挺不错的.以上为个人观点.
|
82 |
+
0 多么苍白无力的剧情,脸谱化的主角,幼齿的语言,我怎么会看了这么本书,killing time的资格都没有。。
|
83 |
+
0 只看了其中的《趁生命气息逗留》一篇。按照这么长时间以来养成的读书传统,照理说对大师的作品要保持相当的尊敬的。即使不欣赏,也只是把这评论放在心里,甚至在看到大多数人对书的的评论都相当高的时候,会偷偷想是不是自己素养没达到,所以欣赏不了。 好吧,我承认《趁生命气息逗留》读后感觉一般,打两星。不过这不代表这书不好或是对大师的不尊敬,仅用于积累喜好数据使豆瓣给我更好的推荐适合个人口味的书之用。
|
84 |
+
0 能读完也是蛮佩服自己的……Twentine最烂的小说
|
85 |
+
1 誠之先生一生所讀史治學心得皆囊括於中。
|
86 |
+
0 读完以后���第一个反应是:额(⊙o⊙)…就这样?现在出版社为了卖书真是什么都敢写啊,让整个欧洲怦然心动的小说?!如果真是这样,欧洲人民的阅读品味也太……哎,像这种没有内涵,阅读快感又不足的作品真是少读为妙。
|
87 |
+
0 弃了
|
88 |
+
0 看不下去,因为作者是文艺教授,所以书写的好学究的,而且也是一个老掉牙的故事,书中总说女主很慌恐,有稳定的职位,完全可以另寻他人,非搞得要死要活,不过也佩服作者,这么简单的故事可以写15万字。
|
89 |
+
0 格局太小,偏向于情感,不是特别易读。
|
90 |
+
1 几个典型的剪辑分享
|
91 |
+
1 没达到我对切的要求,嘿嘿。
|
92 |
+
1 上课看 看到p61 哭
|
93 |
+
1 特别像经济学中的一章,新兴经济学。第一次知道防策略性的重要,总体来说,对每个人都好的决定,却不一定稳定且可实现。全局观还是有一定限制性的,全局中的个体都是追求个人利益最大化的。
|
94 |
+
1 冷热交替的阅读感
|
95 |
+
0 12岁看的……18岁的时候才有些理解了那种感觉……但仍然缺乏对这种文的感受器……摊手……
|
96 |
+
0 别。开头儿看得到结尾。主角比读者蠢,搞咩啊。。。
|
97 |
+
0 肤浅做作,读过一遍,够了。比源泉差太多了。
|
98 |
+
0 淡淡的忧伤,忧伤到蛋疼。
|
99 |
+
0 真失望,毫无新奇特色
|
100 |
+
1 先前读英译本尚有若干细节读得迷糊,在这本里得到满足~~没有炫丽的文采和技巧,没有玄妙的结构,就是踏实的叙述~~但是,波拉尼奥是讲故事的天才,篇篇讲得暗流涌动,篇篇喜欢!比较奇怪的是,同样的赵德明的翻译,《荒野侦探》和《2666》我读了十页就读不下去,这本尽是短句,读起来十分畅快,而且据说从西语直译,和英译本的调子竟然也八九不离十,刮目相看~~~《荒》和《2666》的中译改天买来再试着读读看~~
|
101 |
+
0 不要误会,我不是针对大冰的这一本书,我是说大冰的所有书,都是垃圾。
|
102 |
+
1 好书,改革的这几十年
|
103 |
+
1 经典之作!犀利的视角,绵里藏针的笔触
|
104 |
+
0 不知道好在何处,难道就是因为是创办了PayPal?
|
105 |
+
1 人道主义和国家利益的博弈。“忽视历史的人更容易成为历史的受害者”
|
106 |
+
1 1.好多人没听过。 2.写的不错。
|
107 |
+
0 看了之后感觉失望
|
108 |
+
1 很奇怪地,读书时,眼前像看书中所道之画面,耳畔如作者在娓娓讲述。很久没读到这么好的书了。
|
109 |
+
1 看来看去,最喜欢看的还是港片。嗰书几好睇
|
110 |
+
0 还挺红的小言,真谈不上好看
|
111 |
+
1 香蕉鱼家族
|
112 |
+
1 通过法拉奇的描写,看到了不一样的好莱坞明星们的生活
|
113 |
+
1 重复博弈开始看不懂。看懂的部分对思维的启发和拓展都很大。
|
114 |
+
0 这本书太慢热,我实在坚持不住。
|
115 |
+
0 性价比低 排版用纸少美学 总之不值这个价
|
116 |
+
0 火车上读的,很水
|
117 |
+
1 没找着英文pdf又不想花钱买(……),于是看了中文pdf。好像少了最后一章,不过这本书看到后边让我觉得非常疲劳非常千篇一律,就也不是很care少了的那一章了。。。
|
118 |
+
1 里面说到的一些金融模型什么看不懂。。太专业了。。不过这本书还是蛮有意思的~看着看着会想到乌合之众里面讲的,群体的智力有时候不及个人啊,即使是那些华尔街的天才们。都是被钱障目。
|
119 |
+
0 不止针对本册书,针对全文的评论 开头我真切的怀疑女主真的是国安局上将,为衬托男主的腹黑各种低智。文章宠得时候又太腻太甜,许多情节都凑在一起重复发生,如果穿插起来会是好文。 对于人物的刻画并不具体,大都是虚无缥缈的气质,强大的气场,对于人物的描写太单薄,仅仅突出人物的某一突出特点夸饰却不用具体事件表现,甚至有偷换概念嫌疑 里面作者传达的主观思想借配角和主角的心理描写亦或旁白表达太明显,人为的定义哪个人的对错善恶好坏,而不是给人以思考空间,盲目推崇主角,跟主角作对的都没有好下场,跟主角站在一边的都有好结局,弃“暗”投“明”的也最终获得幸福。对于人物心理的刻画是硬伤,本来以前所作所为都是装的就很牵强(三分真七分假才不会被对手察觉)所思所想都被推翻的小说大概称不上一波三折
|
120 |
+
1 越来越精彩了,人物刻画相比前两部好了许多,作者对历史与文学的掌握和运用相当牛,添彩不少!期待大结局
|
121 |
+
0 胡乱引用,文不对题,不知所谓,比秦风老照片系列差远了。
|
122 |
+
0 这是拿推理小说当爱情小说写,其实绝大多数谋杀都是因为钱,为情极少,作者肯定是个腐女,把男性之间的感情写那么肉麻,完全是一种迎合而已。
|
123 |
+
1 2 每次读这种电影类书籍的时候才会发现自己的观影量有多么的匮乏,还需慢慢积累。
|
124 |
+
1 黑得漂亮
|
125 |
+
0 一句话:苏联上层精英背叛了马列正统思想,倒向了资本主义,因此苏联解体了,社会主义制度崩溃了。学过社会科学方法论的都不应轻易接受这种解释,比如:为什么戈尔巴乔夫的渐进转型和改革失败?苏联哪些上层转变了态度,为什么会转变态度?又在现实中有什么影响?普通民众没有影响吗?书中逻辑矛盾和问题大面积存在。
|
126 |
+
1 无论从装帧,翻译还是书本身都值得给足五颗星。那条书评说得有道理,从某种意义上说,书里的文字有包豪斯的共通理念。
|
127 |
+
1 我的笔为此而从某个时候开始跑起来,向着他跑去,它知道他不久就要到来。
|
128 |
+
1 确实没有多少国家能像俾斯麦的普鲁士一般,光辉灿烂地走上毁灭。而且哈夫纳的笔尖,如同跳跃的音符,谱写出一曲悲壮的普鲁士覆亡的篇章。来时何其壮哉(高度理性,纯军事化,三无政策的国度),去时柔风细雨(威廉二世乐得做德意志皇帝,而不愿提及普鲁士国王,也逐渐让普鲁士融入于大德意志民族之内)
|
129 |
+
1 文笔细腻 内容考究
|
130 |
+
1 可以省去不少本因被浪费的时候
|
131 |
+
1 赞理性和严谨。也赞死也不愿考虑政治因素的执着。
|
132 |
+
0 很讨厌啊。
|
133 |
+
0 文笔太弱了
|
134 |
+
0 文不对题,读不下去。
|
135 |
+
0 对于我来说,一本书,第三类的配角的性格不能乱改,要改必须得给个理由。比如,一个情节里面,路人的性质就是路人,他没其他风格。这书里面一个第三类配角的性格,它换了两次,我受不了!
|
136 |
+
0 內容空洞無病呻吟,與文化積累較深的文人寫的行記無法比較,一小時就看完的書,除了裝幀精美,實在無可贊之處,似乎是一個初中生的日記般
|
137 |
+
1 亚马逊11RMB 朱伟说昆德拉是三流作家是不是有点过分呢?
|
138 |
+
0 没有南极中的细节展现好,而且风格过于冷峻了。。没有个人特色。。读来乏味
|
139 |
+
1 多一颗星是因为这是一百年前的认识,特别是附录1的价值。
|
140 |
+
1 终于看完这本书了。书里描写的是那些被生活折磨着,爱情已经死去,每天只是鸡零狗碎的重复,时刻处在崩溃边缘的人。我觉得还得多看几遍,是因为我现在读起来还很吃力。
|
141 |
+
1 2017读过的top3之一,从宇宙万物混沌虚无到现在这个时点,所需的每一次进化都是一个偶然事件,读后会觉得能生存在此时此地是多么有幸。全书的结构清晰,内容详细,很适合对细节十分考究的读者,虽然此书价格小贵哈哈,但读后绝对一本满足
|
142 |
+
0 只读了开头,翻译的太差了……
|
143 |
+
1 超级霹雳无敌金刚五星。看电视剧都看不懂,我说为啥我总是觉得我懂得多,原来小说里有画外音有心理描写有评论。我真是一个愚昧的人啊。。。
|
144 |
+
1 罗杰斯到底是有多爱中国
|
145 |
+
0 实在看不下去不是我口味
|
146 |
+
1 大赞!
|
147 |
+
0 知青奇幻式的片子,书呆子棋王在逆境中爆发无尽的魅力,小说最后不了了之。电影则完全变成另一种模式,尤其是炮二进三,马四平八的大棋盘对弈,气势极佳。电影远比小说好,小说毫无所谓“多层次性”,糟糕得很,吹捧而已。
|
148 |
+
1 没有完美的公司,终有科学的管理。不愧为复旦新闻系科班出身,功底扎实。
|
149 |
+
0 唯一能确定的是,卡佛很极其熟悉酗酒中人的状态,但这,连带整本书,也算不得一丝半点真正的意义
|
150 |
+
1 读完了,蛮有收获的。初级入门的工具书吧~
|
151 |
+
0 骗钱的吧
|
152 |
+
1 不要用眼睛去看好莱坞,而要用欲望、嫉妒、想象去看它。
|
153 |
+
0 写的太白痴了。。。。
|
154 |
+
0 小天真
|
155 |
+
1 太好看!
|
156 |
+
0 莫非法国人的法语水平都堕落了?
|
157 |
+
1 我读的是中国电影出版社2000年的版本
|
158 |
+
0 《偏执狂》 女主照顾瞎眼男主成功上位。。。
|
159 |
+
0 超级失望,完全是个拼凑卖纸的玩意。
|
160 |
+
0 电视剧太拖沓了就花了两个小时找了电子版来看,如果这就是亦舒的代表作那她写得也并不好啊。套路太简单了,也太模式化了。性格转变都很突兀。不过呢,比电视剧版子君还是好很多啊………………难怪大家看到电视剧版子君要气死吧。。。
|
161 |
+
0 如果说Lonely Planet是旅行者的圣经,这本一定是砸牌子的。 英文原文应该还不错,但半吊子的翻译使得它只剩参考价值了,没错,仅供参考。菲律宾那本倒是很好用,至于日本,看书还不如自己上网站查。
|
162 |
+
0 N年前看过,勉强感觉还不错,现在知道是抄袭呵呵
|
163 |
+
0 不说内容,只说制作。当当活动46折买的,印刷品质粗恶到令人发指。书口都是花的。印刷厂印这书居然用了五种质地各种差各种不一样的纸。油墨味极大极刺鼻。封面设计很好,但是制作实现时各种假,看照片还以为是布脊,其实是劣质铜版纸塑料膜从天糊到地。劣等商品差差差。
|
164 |
+
0 本来我是多热爱运动系少女的啊……看了电视剧再去看书。觉得电视剧真是升华了。小说里的各种白痴逻辑我居然看完了这也是醉了��😱😱😱
|
165 |
+
1 全新的知识
|
166 |
+
0 没有打动我
|
167 |
+
1 很好的教科书。
|
168 |
+
0 读了一百多页表示看不到支持读下去的动力吸引力,三星打得很宽容了,想想对这本书的预期这么高,这么个名声,要减一颗
|
169 |
+
1 真的很不错,里面很多案例都值得学习,而且明显读了之后感觉自己的思维方式有了一定的改变
|
170 |
+
1 关于对孤独的解读你如此清澈如此玩世不恭的诚恳道来
|
171 |
+
0 这本书是娘们看的玩意儿
|
172 |
+
0 就不加友情分了…
|
173 |
+
1 或轻松或犀利的文笔,介绍的很多是年代久远的片子,偶尔翻出来看看,也是很好的。
|
174 |
+
1 看完几乎所有是枝裕和的作品,再看这本书还是有所获得的,比如很多电影的分镜,比如奇迹本来竟是boy meets girl的故事,还好改成了现在这样。其他的部分,还是以周围人的评价和看法居多,是枝裕和内心的东西还是偏少,访谈访的也不深,不过树木希林那段还是蛮让人深思的。
|
175 |
+
0 夸大其辞
|
176 |
+
0 读了一多半。不喜欢。也没完全读懂。2014年2月读。
|
177 |
+
1 才知道,原来竺家荣译的是个“洁本”!!!!那这个版本是否完整?
|
178 |
+
0 太长了看不完也看不懂
|
179 |
+
1 讲述了分离联盟的存在对国家兴衰的影响
|
180 |
+
0 盛名难副 浪费时间 矛盾奖原来是个施粥的
|
181 |
+
1 还没看完。。。看起来满累的。。。
|
182 |
+
0 为什吗我看的都是这么殴的书!!!
|
183 |
+
0 实在读不下去~~
|
184 |
+
1 那是05年的寒冬,在大连图书馆馆藏部看的,当时看的真吃力。10年5月初第二遍开读,10年6月初读完。
|
185 |
+
1 长亭外,古道边,芳草碧连天;晚风佛柳笛声残,夕阳山外山。。。
|
186 |
+
1 毛蟹推荐的
|
187 |
+
0 写个男主是有多可怜!!!女主是有多蠢!完全就是来谋杀男主的哈哈哈哈哈!!!!
|
188 |
+
1 英文名<How China Became Capitalist?>更适合本书主题。全书讨论了诸如双轨结构、边缘革命、地方竞争等话题,梳理了共和国建国六十年来的经济政治变迁,有点像基辛格《论中国》的经济学版。
|
189 |
+
0 果然摘抄不可信啊!有些书做摘抄,一段一段拿来都很好看,合起来就不见得了……其实看题目就知道没什么趣味了吧……orz
|
190 |
+
1 第一次看这类书籍,还是很有收获的,第二三部只是简单翻了翻没有细看了,第二部更像练习册的答案,可以自己看完片子后对照书上写的,自己踩点是否正确。中文版翻译貌似很不好?有空再瞅瞅原版吧!
|
191 |
+
1 人类学结构下的边疆历史,强弱相生的观点的确有理且有趣,对中原北疆及东北的三分式解析,以及对掠夺式和二分法的政治策略解析也站得住脚。而且不同于国内学界对中原国家的非理性认同和隐约偏见及马列史观对历史进步性的刻意把握,采取对汉人朝廷脸谱化,转而对漠北及东北人物的详细刻画,五星好书。
|
192 |
+
1 看完标记。中间四星,第一本和结尾五星。
|
193 |
+
0 路内完蛋草了
|
194 |
+
0 TND,谁打了这么高的分,浪费感情浪费时间。弃!
|
195 |
+
1 之前在淡水的哪个小书店里站着看完台版了 最近小猴又送我一本儿陆版 新经典的礼物 开心 值得再读一遍
|
196 |
+
0 哼哼 哼哼
|
197 |
+
0 这逼装到了天际,男主是江西婺源的,哈哈哈,容我笑一阵…
|
198 |
+
0 给了2星,因为貌似我还没给哪本书1星。修正,曾经给《秘密》这种骗人的书1星。
|
199 |
+
1 第一遍硬啃,第二遍慢慢开始顺畅了,还是得花笨功夫。
|
200 |
+
1 年度最佳随笔集。杨老师已然大师风范。即使是风格随意不拘一格的随笔文,杨依然可以站在世界史的视野上结合中国近代以来的具体情况,讲述民族主义在中国的形成,选择与演变。论述有趣,史料选择恰到好处。值得一读。
|
201 |
+
1 还不错,但最后有些不喜欢~~
|
train/datasets/test_data/book_review/test.tsv
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
label text_a
|
2 |
+
0 我惊了,这些评论的人真的都看了这书吗?还是都是托.....
|
3 |
+
0 男主好渣!看的气死我了!
|
4 |
+
0 大概初中及初中以下水平读者会把这奉为神作吧。今何在构建作品里抹不掉的中二气。
|
5 |
+
0 编辑肯定约稿时候反复提及以“艳遇”为中心了。然后作孽般的拼凑成这么一本破书。都神马水平啊这。
|
6 |
+
0 我是读不下去了……
|
7 |
+
0 严重过誉,斯蒂芬·茨威格这历史观以及叙事的方式和内容,就好像在看现代人拍的古装剧。最无法认同他将历史进程归咎于某些人的一念之差
|
8 |
+
0 太不真实
|
9 |
+
0 手贱的代价。神马傻逼玩意儿
|
10 |
+
0 算......黑历史吧......= =
|
11 |
+
0 一向的风格。懒得说了。
|
12 |
+
0 一开始还挺好看的,可是后面就烂尾了…不停地分手在一起分手在一起……还穿越…唉
|
13 |
+
0 不懂为什么分这么高 我觉得这书的内容真的很一般
|
14 |
+
0 如果不是宣传吹嘘得太夸张,和预想相差甚远的话,或许我给的是三星。失望是最可怕的。
|
15 |
+
0 不多说了。
|
16 |
+
0 看完有一种杀了作者的冲动。这年头是什么人都可以出书了么,三联你要不要这么砸自己的牌子啊!
|
17 |
+
0 翻译之差,无出其右
|
18 |
+
0 当初看的网络连载版,说真的这文到后面越来越崩坏,谁能告诉我女主这么穿越的意义究竟何在?虽然出了大事故,但原来的生活很美好啊。如果走穿后的那一条线也就算了,可以理解为想让男女主早点遇见,可问题是后面又回去了是要闹哪样?感觉此作者的作品没有以前的好看了
|
19 |
+
0 yy得太过分了,文笔普通,还披着软文伪网游的皮……………………又一个得分很高实际很烂的瞎文= =。
|
20 |
+
0 浓浓的“故事会”既视感,这就是本书给我最大的感觉。第一次读马伯庸的书,所以之前不了解他的笔触。在本书中,说的是什么呢?一个普通的动物园故事?一个传教士的故事?一个关于赤峰和其信仰的故事?还是说底层百姓的愚昧无知的故事?我不知道。(有人把这比成赤峰的少年pi,我倒是没想到)。故事在紧要关头,用奇幻色彩来过渡;在一些转折点,则一笔带过,感觉纯粹是为了写故事而写故事,这不就是一个故事会的超长篇小说吗?这个感觉非常相似,也算是说明在我读来,文笔和内容都稍显简单。也可能是我对赤峰不够熟悉,或者读得不够透彻吧。anyway,总体感觉一般。
|
21 |
+
0 什么鬼。。
|
22 |
+
0 姐夫和小姨子,三观感人。而且模仿吴侬软语看的人好累,不太好看,文笔也一般,属于想要模仿匪我思存但非常失败的类型。
|
23 |
+
0 王朔、石康、冯唐,文风和题材差球不多,才华却一代不如一代……
|
24 |
+
0 絮絮叨叨 磨磨唧唧 啰啰嗦嗦,简直就像一部乡下老太太的流水账!没有逻辑 没有重点,对于部分精彩的故事,例如第四次东征和对抗蒙古人的东征,竟然也只用一两页纸就说完了?!你如此简练的文笔为啥要写出400多页的长文?!对于一个对十字军有兴趣的读者来说,这本书实在不适合阅读,差评!
|
25 |
+
0 终于看完了 其实我觉得好没劲呀 要不是因为lp的名声我绝对不会耐着性子把它看完的
|
26 |
+
0 诚然,这世界有着极致的阴暗,光明才更值得被追求。
|
27 |
+
0 书名很吓人,但这只是其中的一个短篇。一本不温不火的私人回忆录,无感。
|
28 |
+
0 因为喜欢封皮的黄色和女主角直子的名字而买了这本书,可是开头看了20页就实在看不下去了。是多么自负的作者才能写出如此自负的男主人公呢,是想写出男主的器宇轩昂,还是就是想塑造这么个雄赳赳的男主来衬托谦虚的女主,总之我是没看下去,试了好几次,每看一次心中就对男主产生极度的厌恶之情,最后这本新书还是送人了。如果是单纯的想看游记的话,这本绝对是下下之选,当小说看也不推荐,总之,差!
|
29 |
+
0 越看到后面越受不了,你说从头和一个男的虐到尾不挺好的?
|
30 |
+
0 李敖的史观旧得想吐槽都无处下嘴。
|
31 |
+
0 难怪被喷,完全就是把新闻改编一下,打上小说的旗号。
|
32 |
+
0 不是很喜欢
|
33 |
+
0 看简介说宏观经济学理论和政策的颠覆之作。以人的非理性行为来解释经济,我兴致勃勃看了,仿佛进了餐馆打算吃顿大餐,前菜麻麻,我忍耐着吃了了,以为终于可以一尝正餐,结果它撤台了。一本书可说大半是废话,咂咂嘴感觉吃了顿西北风。
|
34 |
+
0 这本在线即时翻译型译文毁原著。希望其他出版社重新出译本。英语语法的从句是最不好翻译成中文的,一旦按照英文习惯断句,就会存在没主语、或者主宾混乱的情况。
|
35 |
+
0 这本的翻译看得真不是一般的别扭,倒不是文句不通而是归化腔太重了,实在看不习惯一股京片子味道的日本散文。文章内容本身也相当散乱,而且除了打火机和蜡像那两篇之外毫无作者特有的那种风格,只觉得是没什么实质内容的中国二流杂志随笔而已。
|
36 |
+
0 无奈仍是无奈,疲惫仍是疲惫,原封不动。
|
37 |
+
0 对白太拖拉。。比凤囚凰差了好多。。弃了,看了一半不到吧
|
38 |
+
0 发现中国的小说家真的特别爱用高频次的对话来推进情节,因此对人物的理解也显得非常轻浮和表面,叙事几乎没有“停下来”的时候。而且这样一个很流水账的故事只是复刻了余华在活着或许三观卖血记中的苦难世界,属于自己的风格在哪里?用了很短的时间就几乎通读完全书,说实话,对于能读得这么快的长篇,向来警惕。
|
39 |
+
0 两星实属无奈。虽然女主差强人意,可是男主男配和故事都不差。然而!!作者实在是太爱卖弄文采和诗词了,觉得行文不够连贯,让人厌烦,看起来累得要死。对作者文笔真没好感,不然起码三星吧。
|
40 |
+
0 我一直觉得这书的出版是因为肥猪流的兴起
|
41 |
+
0 翻译很烂,读不下去,以后有空读读英文版的吧。
|
42 |
+
0 我想我一定没能看懂小说背后的意思~~~
|
43 |
+
0 很乏味
|
44 |
+
0 跟大马士革也就半毛钱的关系吧~宣传软文写得精彩正篇灰常1般,人物名字起得实在太土影响阅读...于是不耐烦地翻完了~
|
45 |
+
0 看封面和书名本以为是一本治愈系作品,没想到却是一本暗黑故事集,虽说我承认作者的脑洞很大,但文笔远远跟不上他的脑洞,整篇作品文风矫揉造作,为显示逼格如呓语一般不知所云,充斥着一股非主流的味道!
|
46 |
+
0 可不怎么好看
|
47 |
+
1 那时候县里有一个借书的店,一毛钱一天。借过的最喜欢的一本。
|
48 |
+
1 最大的新颖就是其不同于传统史学的论述视角,不仅没有为现代学术诟病的正朔之争,更摆脱了以中原王朝为视野立足点的习惯,纯粹地以北方草原为本位,贯通式地鸟瞰了整个古代草原世界,是一本通史型的著作。尽管在细节的讨论上仍然存在着太多可商榷的地方,但弥足珍贵之处在于巴氏给出了一个全新的认识角度。很多评论总要提及所谓的人类学视角,其实细看可以发现,书中并没有对人类学中的族群、族团做过任何的分析,哪怕民族认同这样的共同体标志也未被提起过。可见,巴氏的分析更多地还是基于历史学构架,期望通过区分主观史学与客观史学的分野来达到认识的新途径。
|
49 |
+
1 作为强迫症,p256注释2“雪山女生之子”这个很想改过来。
|
50 |
+
0 论见识和文学能力,此书都很差劲,如果说这本书背后代表的意义更重于书本身的内容,我是信的。只是,还是不值得给两颗以上的星,若是早半个世纪,恐好评如潮。
|
51 |
+
0 没有故事性的小说即使包裹在再华美的文字下也是苍白,何况整本书连华美的文字都没有。我宁愿回去重新复习誓鸟和水仙!
|
52 |
+
1 经历过那样不忍述说的灾难,回家那条漫长的,充斥着死亡、流离和痛苦的道路,在作者笔下竟然像被镀上一层带有暖意的阳光,有信仰、勇气和爱,真心难得。
|
53 |
+
1 一个人的旅途 这会是好的选择
|
54 |
+
1 耐读。导演很坦诚。还以为鳗鱼和楢山节考会有大篇幅的拍摄细节讲述,结果只是随笔一提,挺纳闷~
|
55 |
+
1 我以前从未意识到德国的统一意味着普鲁士光辉灿烂地走向毁灭 这世界是你的遗嘱 而我是你唯一的遗物 其实我真正想看的只有最后四分之一 可以脑补万字独普独虐文了
|
56 |
+
1 真的是一本巨著!虽然我自己更喜欢的是荒野侦探,但是2666有一种让人无法忽视的伟大。
|
57 |
+
1 就看在這麼刷三觀的份兒上什麼書寫排版上的毛病都不是問題。。。
|
58 |
+
0 这部不好看,第一部惊喜,第二部一般,能看完,第三部看了一部分再也看不下去了。
|
59 |
+
0 读了几页就读不下去的...矫情
|
60 |
+
0 正好在接触残疾人补贴事物,发现扶贫是个长远工程,非单纯给钱就能完成。
|
61 |
+
1 “如果我们最后只能在地狱城上岸,我们有两种避免受苦的办法,对于许多人,第一种比较容易,接受地狱并且成为它的一部分,这样就不必看见它。第二种有些风险,而且必须时刻警惕提防:在地狱里找出非地狱的人和物,学习认识他们,让它们持续下去,给他们空间。”
|
62 |
+
1 混乱的世界与和它一样混乱的青春
|
63 |
+
1 谢谢黄同学赠书
|
64 |
+
0 铺垫的太多了看到一半实在看不下去 情节能不能紧凑点
|
65 |
+
1 可以说是资本论的通俗版,因为通俗丢失了一些东西,但也因此更容易让人看懂,好书
|
66 |
+
0 这书真是无趣到了极点……
|
67 |
+
1 英文书还是读原版。与保尔森、伯南克的书互为印证
|
68 |
+
0 看了等于没看,还浪费时间的书
|
69 |
+
0 虽然还是在胡说,但程度上似乎比前两部少了一些。
|
70 |
+
1 大部头的小说
|
71 |
+
0 这类充满灰暗内容、负能量的小说都应该扔垃圾堆里烧了
|
72 |
+
1 读了两遍都没��懂。。。。
|
73 |
+
0 所谓的闲笔或者自谓的这种也可称之为的“叙诡”于故事本身是毫无作用的,炫技有时不到点上确实很难堪
|
74 |
+
0 垃圾中的战斗机
|
75 |
+
0 白送我都不稀罕。
|
76 |
+
0 其实观点还阔以,非得他妈不说正常人话,拽个鸡巴
|
77 |
+
1 深入浅出,结合史实、材料的学术文,四星起
|
78 |
+
1 看的是译言网的中译版本,极大满足了我的猎奇心理啊。从外国人的眼光看中国社会果然不一样
|
79 |
+
1 有太多的思绪而至内心更加空洞,最后尼采透露出的一部分自我让读者和他自己都得到了救赎,有人说,人是为了幸福而活,人永远需要爱,也需要付出爱,而哲学似乎不会给予该怎么生活的答案,就如尼采所诉,这条路只能自己寻找。无论对自我的局限走到哪里,只要做出选择,就去爱这选择,虽然不会被治愈,但至少被慰籍。
|
80 |
+
0 前半部还比较好,后半部就有点不紧凑。
|
81 |
+
1 虽然写的比较浅,好歹让人对大中东有个基本概念,另外图文并茂的看着也舒服
|
82 |
+
1 读了一半,还行吧,没有当年《奇迹的黄昏》带给我的震撼大。作者的叙述总有一点悲天悯人的调调,本质上来说,我是一个乐观的人,所以看着有点憋屈。当然作者的观点,给我很多启发,但是论述的逻辑性尚缺严密。
|
83 |
+
1 首先看的是伊伯特的纪录片“人生如戏”,后来一直想找来此书一读,奈何过于火爆,至今日才在图书馆预约到。多每部电影的影评,提及的电影也大都看过,但共鸣点委实不一样。更觉哈姆雷特不止一个,和经历观念太相关了。许多更伟大的电影未提及,也算遗憾。
|
84 |
+
1 过程像行尸走肉,结局像迷雾
|
85 |
+
1 言简意赅,处处都是精华
|
86 |
+
0 太无聊的阴谋论。
|
87 |
+
1 一口气读完 很喜欢。尤其是这本书里关维兴的插画 很喜欢。
|
88 |
+
0 补记/看不下去啊~
|
89 |
+
1 纸上谈兵的截图指南,推荐。
|
90 |
+
0 粗读。都是观点,没有数据和事例。
|
91 |
+
0 2017-15 未雨绸缪四个字说得那么复杂
|
92 |
+
1 好畅快
|
93 |
+
1 入门之作
|
94 |
+
0 天马行空,欣赏不来
|
95 |
+
0 我想了一下这本书被大力推崇,乃至"改变了对明朝的固有看法",然后就是推什么天子守国门之类的.在这类读者看来,气节之类是最重要的(说起来明朝最讲气节的几个反而不是真的气节),至于锦衣卫,东西厂之类则被视作不存在.晚明的颓丧糜烂风气也被视为一种明朝活力的象征.无论如何吹,明朝绝对不是几个长期王朝里最强的,甚至宋也算不了.唐朝的强在于儒教在此一朝只是多元中的一部分(中晚唐儒教重起,儒生又无法控制藩镇武人,则唐归于毁灭)
|
96 |
+
0 这书我看了一年还没看完,断断续续,弃了看,看了弃,实在是无法一口气读下去。。故事是个好故事,可是文笔也太啰嗦了。。。
|
97 |
+
0 非常令人失望
|
98 |
+
0 死亡让一切平庸显得伟大;这叫做尸体的浮肿。
|
99 |
+
1 没有耐心的就不要读了,完全没有八卦好么
|
100 |
+
0 飞行读物。翻译的不错很细腻。也从书里看见自己虽对前路迷茫也不会沉浸在过去了。
|
101 |
+
0 越读越涩,涩到煎熬
|
102 |
+
0 佩服作者的观察力和毅力,但是一点可读性都没有,干巴巴的。
|
103 |
+
1 此书的靠谱程度和好莱坞每年出产的烂片数量形成了奇妙的对比。
|
104 |
+
1 案例值得深思
|
105 |
+
0 看到这样的文我就觉得网络文学的质量果然还是正常的
|
106 |
+
1 写史一绝,没有矫揉造作,没有铁血刚硬,可以深深地体味对历史的敬畏和温情,文字辛辣,却没有语言的偏激,这其中包含着文人墨客对中国政治自古的追索,让人心酸,让人欣慰,值得铭想。
|
107 |
+
1 很容易看下去,简单但是有深度,与历史结合的写法非常有趣
|
108 |
+
1 比较全
|
109 |
+
0 想法不错,一上来还挺惊艳的,越看越不行,七拼八凑现学现卖的感觉,且文风恶俗,从头到尾毫无节制地贫嘴
|
110 |
+
1 爽。
|
111 |
+
1 ……读完整个人魂灵都处在被勾走的状态QAQ 2001库神影像已先入为主了 想给2001一万颗星TvT(“God, it's full of stars!!!!!”(。 神一样的思维设定基础 觉得2010可读性最强(……虽然被电影作用了从土星转成了木星=。= 2061有点水 3001就真的……唉说不出话了QAQ “你我所拥有的自由,只怕比想像中还要少。”(btw 这真的是目前最棒的生日礼物QAQ
|
112 |
+
0 真的很一般啊
|
113 |
+
0 矫情逼
|
114 |
+
1 毁城记
|
115 |
+
0 零散的不成体系;自以为是的抒情 鸟人/法国中尉的女人/一个男人与一个女人/骗术大全/美国往事/罗拉快跑
|
116 |
+
0 难以做出评价,很多不认同,so
|
117 |
+
0 流水账一般
|
118 |
+
1 作者对历史很有了解,小说夹杂历史,难得的好作品
|
119 |
+
0 同学的书 凑合看完
|
120 |
+
0 女主的性格倒不让人讨厌,只是看的时候心里总有个声音在说:“哪有这样的人。”很是心烦。东方非和凤一郎倒是很有意思,你们在一起吧。
|
121 |
+
0 故事还可以,但感觉情节不够丰满,很多东西,都没清楚��代,有些转折有点生硬。文字感觉也比较一般。不是特别特别特别多时间的人不推荐阅读。
|
122 |
+
0 真真切切的流水账,青春回忆+黑道+高干+富二代+绝世美颜(男主姐,法拉利家族二儿子拜倒裙下)+娱乐圈+天上人间+... 狗血再来得猛烈些吧!这真的是写《匆匆那年》的九夜茴么?就这还写了将近四十万字上下两册?作者神了, 出了这么多本书,还能来这么一部跟处女作网文一样的作品,真的不是把当年的出道作品拿出来充数了么?真的不是为了交差应付凑了一本?真的不是枪手在写大学开始的故事么?给跪了。留着的两星给幼年部分。一些对话和情节能透出纸张让人尴尬。
|
123 |
+
0 我不明白他们为什么这样迷信野史
|
124 |
+
0 唐七的小说除了那看似华丽的文字外,还有什么?读了唐七的华胥引和三生三世之后,我忽然觉得郭四娘的小说真是太特么好看了。唐七的小说和他的笔名一样,充满了装逼的气息。角色塑造也几乎和华胥引完全一样,两部小说的男主都是能力强悍的高冷系——装逼者的最爱,而女主则都是中二病带点转了几世的小忧伤......呵呵,您能稍微突破下自己,多少有点创意吗?
|
125 |
+
1 感觉很无聊~但是刚推出是看豆瓣评到8.多就下手了
|
126 |
+
1 希望我看的不是删节版的。
|
127 |
+
0 已经不能读中文小说了,太难看了,尤其是里面对女性角色的刻画,真是丢人。
|
128 |
+
1 「即便你是個僧侶,像我哥哥那樣,盛怒或屠殺也會在某天找上門來。因為沒有社會的存在你也無法以僧侶的身份生存。你摒棄社會種種,但要達成這一目標必須先成為它的一分子,從中學習並做出決斷。這是歸隱的悖論。」 / 四五年前第一次接觸到這本書的時候,是台灣譯者的翻譯,書名叫《菩薩凝望的島嶼》。前段時間朋友在力推這本書我以為是翁達傑的新書,之前並沒有注意到英文名,後來一查,才知道是同一本。但無論是哪個翻譯,都覺得蠻好的。幾年後再讀是另一種完全不同的感覺。我不得不喜歡那個「買」了她哥哥的名字 ——「安尼爾」的女法醫人類學專家。
|
129 |
+
0 越写越恶心 妈呀 这样的女主 太长的文 必然问题颇多
|
130 |
+
1 经常看到默默流泪……
|
131 |
+
1 2015年读完的,还行吧!
|
132 |
+
1 很喜欢,再多读几遍
|
133 |
+
0 蒂莫西·加顿艾什的《档案》《事实即颠覆》都很差!
|
134 |
+
0 这种意识流的看点在哪?
|
135 |
+
1 感觉自己被祖伊训的狗血喷头 但结尾有点马虎啊 不能拿大哥作幌子啊
|
136 |
+
0 这算哪门子的纯文学
|
137 |
+
0 行文过于流水帐,不推荐阅读
|
138 |
+
0 其实除了一点历史之外看不出男女主有什么时代差异
|
139 |
+
0 原来以为还可以,看完发现相当无聊。
|
140 |
+
1 与其说是创业的国度,不如说是创新的国度,可是一个国度的创新背后往往是它一路走来,甚至永远摆脱不掉的民族血泪。所谓多难兴邦,或许也有这层意味。
|
141 |
+
1 一百个赞
|
142 |
+
1 奇遇的重演
|
143 |
+
1 创新是这个世界保持增长的根本原因,创新需要知识技术及载体(人类和人工智能)。我们这个世界与几万年前人类祖先智人生活的世界的不同之处在于物理秩序(信息)的不同,现在处于信息爆炸的时代,不同的知识技术储存在不同的个体、组织中,要想持续不断地创新就要扩大我们的社会关系网络,以获得更多的知识技术和计算能力,所以,链接越来越重要。
|
144 |
+
1 宋朝部分写的太好了
|
145 |
+
1 印象很深的就是罗斯福想要帮助英国抗衡德国,每一步都很艰难,要说服国会说服人民,反观日本德国,希特勒,军部说一不二,制度的差距如此大。
|
146 |
+
0 好脑残的,诧异能印出来
|
147 |
+
1 专业影评写作指南
|
148 |
+
0 屁话多!!就是说这个好,这个重要。一点都不深入,中信居然给出版了,噱头大啊!!第三次工业革命并不仅仅围绕能源,也不仅仅是核电风电光伏!!
|
149 |
+
0 开篇便是甩掉你的坏旅伴,但后面就是一个complain的集合体,最后再凸显一下我能忍受着一切,牛逼吧。看来看去,整本书写的最好的部分是写给自己旅伴的孩子的信。也许真人是个有趣的人,但落实到文字上就算了。
|
150 |
+
1 对俺来说有点专业,但是确实对了解黑色电影很有用
|
151 |
+
0 俗
|
152 |
+
1 儒家文明PK海洋强盗文化,尘归尘土归土,全球化的今天,谁都不可能幸免。中国人真的有必要一直勤劳勇敢朴素下去吗?不劳动真的那么可耻吗?今天的中国会有不同于100年前的命运选择吗?一起拭目以待吧。。。话说,希腊政府真是够可耻的!!
|
153 |
+
0 把先贤们都说过的事情拼凑一下,炒冷饭。有点无聊,最多2星……
|
154 |
+
1 被我发现了刊印错误……比较实用的一本书,但是写影评最重要的部分是对电影知识的了解和笔头功夫,所以练习更重要。
|
155 |
+
0 目的性太强
|
156 |
+
1 自己看过的编剧类书籍中,内容最完整的一本。它涉及的面更广,不仅是一本剧本写作指南,也是一个(好莱坞)编剧从业指南。
|
157 |
+
1 故事的主题是创新
|
158 |
+
0 玛丽苏,拉分数的我。这书名无语了
|
159 |
+
1 新晋男神。
|
160 |
+
1 我记得我有两本来着
|
161 |
+
1 很不错
|
162 |
+
1 看到毕业礼那里确实挺心酸的,小时候我也像弗兰西那样的爱爸爸。
|
163 |
+
1 似懂非懂。
|
164 |
+
1 这是我读的第一本钱先生的书,当初的那种醍醐灌顶之感如今还清晰可感,不管怎样,我算是读到了一些东西。
|
165 |
+
0 好失望~
|
166 |
+
0 看完觉得很恶心,不能认同作者的观点,怎么豆瓣评价这么高?
|
167 |
+
0 凉生,我们可不可以不这么作……
|
168 |
+
0 烂书一本。
|
169 |
+
1 很美丽的地方,喜欢作者摘的那些水果
|
170 |
+
0 不知道该说是资本主义的虚伪,还是人性的虚伪。
|
171 |
+
1 语言圆润 论点偏大众
|
172 |
+
0 动机真的是青春期中二症典型例子...
|
173 |
+
0 图表上的字太大,影响了图表的直观性。
|
174 |
+
0 描写知识分子们在新中国成立后第一次政治运动中遭遇和表现的小说,然而却仅仅停留在历史场景的复述和控诉上,缺乏深层次的批判和思考;写法手法上也没有太多的技巧可言。看完卡夫卡和昆德拉的作品,再来读这本《洗澡》,实在有味同嚼蜡的感觉。
|
175 |
+
1 知我者,谓我心忧;不知我者,谓我何求。 历史的误会。
|
176 |
+
0 原来《庶女有毒》换了个名字出版。非常长的宅斗文,女主金手指大开,初看非常爽,再看经不起推敲。听说是抄袭了一百多本书,可怕。仔细看的话用词和标点符号都非常不到位,很幼稚。李长乐的美若天仙和智商真是不成正比,蒋家五个最优秀的儿子全都死了,算计人和吃白菜似的。
|
177 |
+
0 你丫知道什么是纪事本末吗
|
178 |
+
1 街上那点事儿。翻译不错,可以一看。
|
179 |
+
0 好长好无聊,可怜的端午假期
|
180 |
+
1 依然如此吸引人,但是有些郁闷,感觉比呐喊更阴暗些。不知为何极喜欢魏连殳。
|
181 |
+
0 差。
|
182 |
+
0 最后直接叫朋友说了结局,没了闲情看下去。
|
183 |
+
0 一出闹剧。如果说秋菊是轴,那么李雪莲就是作。小说里的人名:王公道 董宪法 荀正义 史为民 储清廉,真是受不了这种赤裸裸的反讽。先不管小说写的怎么样,反正读起来感觉很不好就是了。。囧rz
|
184 |
+
1 终于搞清来龙去脉
|
185 |
+
1 高二时候读的,记忆深刻
|
186 |
+
0 非常差。各种不靠谱。没有什么当地的风土人情,只有作者貌似站的很高的感叹。自我感觉太良好了吧
|
187 |
+
0 电影挺喜欢的,故事也挺喜欢的,可是这本书的翻译啊,真的好敷衍
|
188 |
+
1 酣畅淋漓
|
189 |
+
0 我不懂评分为什么这么高,太扯了
|
190 |
+
0 就冲这出版社的社品。。。尼玛一星都配不上!!!
|
191 |
+
0 浓浓的直男癌口气…好恶心…
|
192 |
+
0 忘不掉过去,就很难得到现在和未来的幸福,那种抱着过去和现在死磕的人最讨厌了!
|
193 |
+
0 1.作者何必不断强调自己是乡下人?你若看得起自己 没人看不起你 2.我还真不知道星巴克里除了美式之外两杯咖啡才收37元钱 3.这感情真是你麻痹来的莫名其妙 4.这个和何以完全不是一个档次的 何以比这个好看一千万倍不止。
|
194 |
+
0 王朔也就一街头低端草泥马
|
195 |
+
0 完全get不到点的男女关系
|
196 |
+
0 倾向性太明显了吧……
|
197 |
+
1 曾经如此,此后不再。要记得。
|
198 |
+
0 烂!!
|
199 |
+
0 白左垃圾
|
200 |
+
0 废话太多 看到第三页就会忘了第一页说了什么 可能是我悟性太差吧
|
201 |
+
1 对侯导的作品又多了一些了解。
|
train/datasets/test_data/book_review/test_nolabel.tsv
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
text_a
|
2 |
+
我惊了,这些评论的人真的都看了这书吗?还是都是托.....
|
3 |
+
男主好渣!看的气死我了!
|
4 |
+
大概初中及初中以下水平读者会把这奉为神作吧。今何在构建作品里抹不掉的中二气。
|
5 |
+
编辑肯定约稿时候反复提及以“艳遇”为中心了。然后作孽般的拼凑成这么一本破书。都神马水平啊这。
|
6 |
+
我是读不下去了……
|
7 |
+
严重过誉,斯蒂芬·茨威格这历史观以及叙事的方式和内容,就好像在看现代人拍的古装剧。最无法认同他将历史进程归咎于某些人的一念之差
|
8 |
+
太不真实
|
9 |
+
手贱的代价。神马傻逼玩意儿
|
10 |
+
算......黑历史吧......= =
|
11 |
+
一向的风格。懒得说了。
|
12 |
+
一开始还挺好看的,可是后面就烂尾了…不停地分手在一起分手在一起……还穿越…唉
|
13 |
+
不懂为什么分这么高 我觉得这书的内容真的很一般
|
14 |
+
如果不是宣传吹嘘得太夸张,和预想相差甚远的话,或许我给的是三星。失望是最可怕的。
|
15 |
+
不多说了。
|
16 |
+
看完有一种杀了作者的冲动。这年头是什么人都可以出书了么,三联你要不要这么砸自己的牌子啊!
|
17 |
+
翻译之差,无出其右
|
18 |
+
当初看的网络连载版,说真的这文到后面越来越崩坏,谁能告诉我女主这么穿越的意义究竟何在?虽然出了大事故,但原来的生活很美好啊。如果走穿后的那一条线也就算了,可以理解为想让男女主早点遇见,可问题是后面又回去了是要闹哪样?感觉此作者的作品没有以前的好看了
|
19 |
+
yy得太过分了,文笔普通,还披着软文伪网游的皮……………………又一个得分很高实际很烂的瞎文= =。
|
20 |
+
浓浓的“故事会”既视感,这就是本书给我最大的感觉。第一次读马伯庸的书,所以之前不了解他的笔触。在本书中,说的是什么呢?一个普通的动物园故事?一个传教士的故事?一个关于赤峰和其信仰的故事?还是说底层百姓的愚昧无知的故事?我不知道。(有人把这比成赤峰的少年pi,我倒是没想到)。故事在紧要关头,用奇幻色彩来过渡;在一些转折点,则一笔带过,感觉纯粹是为了写故事而写故事,这不就是一个故事会的超长篇小说吗?这个感觉非常相似,也算是说明在我读来,文笔和内容都稍显简单。也可能是我对赤峰不够熟悉,或者读得不够透彻吧。anyway,总体感觉一般。
|
21 |
+
什么鬼。。
|
22 |
+
姐夫和小姨子,三观感人。而且模仿吴侬软语看的人好累,不太好看,文笔也一般,属于想要模仿匪我思存但非常失败的类型。
|
23 |
+
王朔、石康、冯唐,文风和题材差球不多,才华却一代不如一代……
|
24 |
+
絮絮叨叨 磨磨唧唧 啰啰嗦嗦,简直就像一部乡下老太太的流水账!没有逻辑 没有重点,对于部分精彩的故事,例如第四次东征和对抗蒙古人的东征,竟然也只用一两页纸就说完了?!你如此简练的文笔为啥要写出400多页的长文?!对于一个对十字军有兴趣的读者来说,这本书实在不适合阅读,差评!
|
25 |
+
终于看完了 其实我觉得好没劲呀 要不是因为lp的名声我绝对不会耐着性子把它看完的
|
26 |
+
诚然,这世界有着极致的阴暗,光明才更值得被追求。
|
27 |
+
书名很吓人,但这只是其中的一个短篇。一本不温不火的私人回忆录,无感。
|
28 |
+
因为喜欢封皮的黄色和女主角直子的名字而买了这本书,可是开头看了20页就实在看不下去了。是多么自负的作者才能写出如此自负的男主人公呢,是想写出男主的器宇轩昂,还是就是想塑造这么个雄赳赳的男主来衬托谦虚的女主,总之我是没看下去,试了好几次,每看一次心中就对男主产生极度的厌恶之情,最后这本新书还是送人了。如果是单纯的想看游记的话,这本绝对是下下之选,当小说看也不推荐,总之,差!
|
29 |
+
越看到后面越受不了,你说从头和一个男的虐到尾不挺好的?
|
30 |
+
李敖的史观旧得想吐槽都无处下嘴。
|
31 |
+
难怪被喷,完全就是把新闻改编一下,打上小说的旗号。
|
32 |
+
不是很喜欢
|
33 |
+
看简介说宏观经济学理论和政策的颠覆之作。以人的非理性行为来解释经济,我兴致勃勃看了,仿佛进了餐馆打算吃顿大餐,前菜麻麻,我忍耐着吃了了,以为终于可以一尝正餐,结果它撤台了。一本书可说大半是废话,咂咂嘴感觉吃了顿西北风。
|
34 |
+
这本在线即时翻译型译文毁原著。希望其他出版社重新出译本。英语语法的从句是最不好翻译成中文的,一旦按照英文习惯断句,就会存在没主语、或者主宾混乱的情况。
|
35 |
+
这本的翻译看得真不是一般的别扭,倒不是文句不通而是归化腔太重了,实在看不习惯一股京片子味道的日本散文。文章内容本身也相当散乱,而且除了打火机和蜡像那两篇之外毫无作者特有的那种风格,只觉得��没什么实质内容的中国二流杂志随笔而已。
|
36 |
+
无奈仍是无奈,疲惫仍是疲惫,原封不动。
|
37 |
+
对白太拖拉。。比凤囚凰差了好多。。弃了,看了一半不到吧
|
38 |
+
发现中国的小说家真的特别爱用高频次的对话来推进情节,因此对人物的理解也显得非常轻浮和表面,叙事几乎没有“停下来”的时候。而且这样一个很流水账的故事只是复刻了余华在活着或许三观卖血记中的苦难世界,属于自己的风格在哪里?用了很短的时间就几乎通读完全书,说实话,对于能读得这么快的长篇,向来警惕。
|
39 |
+
两星实属无奈。虽然女主差强人意,可是男主男配和故事都不差。然而!!作者实在是太爱卖弄文采和诗词了,觉得行文不够连贯,让人厌烦,看起来累得要死。对作者文笔真没好感,不然起码三星吧。
|
40 |
+
我一直觉得这书的出版是因为肥猪流的兴起
|
41 |
+
翻译很烂,读不下去,以后有空读读英文版的吧。
|
42 |
+
我想我一定没能看懂小说背后的意思~~~
|
43 |
+
很乏味
|
44 |
+
跟大马士革也就半毛钱的关系吧~宣传软文写得精彩正篇灰常1般,人物名字起得实在太土影响阅读...于是不耐烦地翻完了~
|
45 |
+
看封面和书名本以为是一本治愈系作品,没想到却是一本暗黑故事集,虽说我承认作者的脑洞很大,但文笔远远跟不上他的脑洞,整篇作品文风矫揉造作,为显示逼格如呓语一般不知所云,充斥着一股非主流的味道!
|
46 |
+
可不怎么好看
|
47 |
+
那时候县里有一个借书的店,一毛钱一天。借过的最喜欢的一本。
|
48 |
+
最大的新颖就是其不同于传统史学的论述视角,不仅没有为现代学术诟病的正朔之争,更摆脱了以中原王朝为视野立足点的习惯,纯粹地以北方草原为本位,贯通式地鸟瞰了整个古代草原世界,是一本通史型的著作。尽管在细节的讨论上仍然存在着太多可商榷的地方,但弥足珍贵之处在于巴氏给出了一个全新的认识角度。很多评论总要提及所谓的人类学视角,其实细看可以发现,书中并没有对人类学中的族群、族团做过任何的分析,哪怕民族认同这样的共同体标志也未被提起过。可见,巴氏的分析更多地还是基于历史学构架,期望通过区分主观史学与客观史学的分野来达到认识的新途径。
|
49 |
+
作为强迫症,p256注释2“雪山女生之子”这个很想改过来。
|
50 |
+
论见识和文学能力,此书都很差劲,如果说这本书背后代表的意义更重于书本身的内容,我是信的。只是,还是不值得给两颗以上的星,若是早半个世纪,恐好评如潮。
|
51 |
+
没有故事性的小说即使包裹在再华美的文字下也是苍白,何况整本书连华美的文字都没有。我宁愿回去重新复习誓鸟和水仙!
|
52 |
+
经历过那样不忍述说的灾难,回家那条漫长的,充斥着死亡、流离和痛苦的道路,在作者笔下竟然像被镀上一层带有暖意的阳光,有信仰、勇气和爱,真心难得。
|
53 |
+
一个人的旅途 这会是好的选择
|
54 |
+
耐读。导演很坦诚。还以为鳗鱼和楢山节考会有大篇幅的拍摄细节讲述,结果只是随笔一提,挺纳闷~
|
55 |
+
我以前从未意识到德国的统一意味着普鲁士光辉灿烂地走向毁灭 这世界是你的遗嘱 而我是你唯一的遗物 其实我真正想看的只有最后四分之一 可以脑补万字独普独虐文了
|
56 |
+
真的是一本巨著!虽然我自己更喜欢的是荒野侦探,但是2666有一种让人无法忽视的伟大。
|
57 |
+
就看在這麼刷三觀的份兒上什麼書寫排版上的毛病都不是問題。。。
|
58 |
+
这部不好看,第一部惊喜,第二部一般,能看完,第三部看了一部分再也看不下去了。
|
59 |
+
读了几页就读不下去的...矫情
|
60 |
+
正好在接触残疾人补贴事物,发现扶贫是个长远工程,非单纯给钱就能完成。
|
61 |
+
“如果我们最后只能在地狱城上岸,我们有两种避免受苦的办法,对于许多人,第一种比较容易,接受地狱并且成为它的一部分,这样就不必看见它。第二种有些风险,而且必须时刻警惕提防:在地狱里找出非地狱的人和物,学习认识他们,让它们持续下去,给他们空间。”
|
62 |
+
混乱的世界与和它一样混乱的青春
|
63 |
+
谢谢黄同学赠书
|
64 |
+
铺垫的太多了看到一半实在看不下去 情节能不能紧凑点
|
65 |
+
可以说是资本论的通俗版,因为通俗丢失了一些东西,但也因此更容易让人看懂,好书
|
66 |
+
这书真是无趣到了极点……
|
67 |
+
英文书还是读原版。与保尔森、伯南克的书互为印证
|
68 |
+
看了等于没看,还浪费时间的书
|
69 |
+
虽然还是在胡说,但程度上似乎比前两部少了一些。
|
70 |
+
大部头的小说
|
71 |
+
这类充满灰暗内容、负能量的小说都应该扔垃圾堆里烧了
|
72 |
+
读了两遍都没读懂。。。。
|
73 |
+
所谓的闲笔或者自谓的这种也可称之为的“叙诡”于故事本身是毫无作用的,炫技有时不到点上确���很难堪
|
74 |
+
垃圾中的战斗机
|
75 |
+
白送我都不稀罕。
|
76 |
+
其实观点还阔以,非得他妈不说正常人话,拽个鸡巴
|
77 |
+
深入浅出,结合史实、材料的学术文,四星起
|
78 |
+
看的是译言网的中译版本,极大满足了我的猎奇心理啊。从外国人的眼光看中国社会果然不一样
|
79 |
+
有太多的思绪而至内心更加空洞,最后尼采透露出的一部分自我让读者和他自己都得到了救赎,有人说,人是为了幸福而活,人永远需要爱,也需要付出爱,而哲学似乎不会给予该怎么生活的答案,就如尼采所诉,这条路只能自己寻找。无论对自我的局限走到哪里,只要做出选择,就去爱这选择,虽然不会被治愈,但至少被慰籍。
|
80 |
+
前半部还比较好,后半部就有点不紧凑。
|
81 |
+
虽然写的比较浅,好歹让人对大中东有个基本概念,另外图文并茂的看着也舒服
|
82 |
+
读了一半,还行吧,没有当年《奇迹的黄昏》带给我的震撼大。作者的叙述总有一点悲天悯人的调调,本质上来说,我是一个乐观的人,所以看着有点憋屈。当然作者的观点,给我很多启发,但是论述的逻辑性尚缺严密。
|
83 |
+
首先看的是伊伯特的纪录片“人生如戏”,后来一直想找来此书一读,奈何过于火爆,至今日才在图书馆预约到。多每部电影的影评,提及的电影也大都看过,但共鸣点委实不一样。更觉哈姆雷特不止一个,和经历观念太相关了。许多更伟大的电影未提及,也算遗憾。
|
84 |
+
过程像行尸走肉,结局像迷雾
|
85 |
+
言简意赅,处处都是精华
|
86 |
+
太无聊的阴谋论。
|
87 |
+
一口气读完 很喜欢。尤其是这本书里关维兴的插画 很喜欢。
|
88 |
+
补记/看不下去啊~
|
89 |
+
纸上谈兵的截图指南,推荐。
|
90 |
+
粗读。都是观点,没有数据和事例。
|
91 |
+
2017-15 未雨绸缪四个字说得那么复杂
|
92 |
+
好畅快
|
93 |
+
入门之作
|
94 |
+
天马行空,欣赏不来
|
95 |
+
我想了一下这本书被大力推崇,乃至"改变了对明朝的固有看法",然后就是推什么天子守国门之类的.在这类读者看来,气节之类是最重要的(说起来明朝最讲气节的几个反而不是真的气节),至于锦衣卫,东西厂之类则被视作不存在.晚明的颓丧糜烂风气也被视为一种明朝活力的象征.无论如何吹,明朝绝对不是几个长期王朝里最强的,甚至宋也算不了.唐朝的强在于儒教在此一朝只是多元中的一部分(中晚唐儒教重起,儒生又无法控制藩镇武人,则唐归于毁灭)
|
96 |
+
这书我看了一年还没看完,断断续续,弃了看,看了弃,实在是无法一口气读下去。。故事是个好故事,可是文笔也太啰嗦了。。。
|
97 |
+
非常令人失望
|
98 |
+
死亡让一切平庸显得伟大;这叫做尸体的浮肿。
|
99 |
+
没有耐心的就不要读了,完全没有八卦好么
|
100 |
+
飞行读物。翻译的不错很细腻。也从书里看见自己虽对前路迷茫也不会沉浸在过去了。
|
101 |
+
越读越涩,涩到煎熬
|
102 |
+
佩服作者的观察力和毅力,但是一点可读性都没有,干巴巴的。
|
103 |
+
此书的靠谱程度和好莱坞每年出产的烂片数量形成了奇妙的对比。
|
104 |
+
案例值得深思
|
105 |
+
看到这样的文我就觉得网络文学的质量果然还是正常的
|
106 |
+
写史一绝,没有矫揉造作,没有铁血刚硬,可以深深地体味对历史的敬畏和温情,文字辛辣,却没有语言的偏激,这其中包含着文人墨客对中国政治自古的追索,让人心酸,让人欣慰,值得铭想。
|
107 |
+
很容易看下去,简单但是有深度,与历史结合的写法非常有趣
|
108 |
+
比较全
|
109 |
+
想法不错,一上来还挺惊艳的,越看越不行,七拼八凑现学现卖的感觉,且文风恶俗,从头到尾毫无节制地贫嘴
|
110 |
+
爽。
|
111 |
+
……读完整个人魂灵都处在被勾走的状态QAQ 2001库神影像已先入为主了 想给2001一万颗星TvT(“God, it's full of stars!!!!!”(。 神一样的思维设定基础 觉得2010可读性最强(……虽然被电影作用了从土星转成了木星=。= 2061有点水 3001就真的……唉说不出话了QAQ “你我所拥有的自由,只怕比想像中还要少。”(btw 这真的是目前最棒的生日礼物QAQ
|
112 |
+
真的很一般啊
|
113 |
+
矫情逼
|
114 |
+
毁城记
|
115 |
+
零散的不成体系;自以为是的抒情 鸟人/法国中尉的女人/一个男人与一个女人/骗术大全/美国往事/罗拉快跑
|
116 |
+
难以做出评价,很多不认同,so
|
117 |
+
流水账一般
|
118 |
+
作者对历史很有了解,小说夹杂历史,难得的好作品
|
119 |
+
同学的书 凑合看完
|
120 |
+
女主的性格倒不让人讨厌,只是看的时候心里总有个声音在说:“哪有这样的人。”很是心烦。东方非和凤一郎倒是很有意思,你们在一起吧。
|
121 |
+
故事还可以,但感觉情节不够丰满,很多东西,都没清楚交代,有些转折有点生硬。文字感觉也比较一般。不是特别特别特别多时间的人不推荐阅读。
|
122 |
+
真真切切的流水账,青春回忆+黑道+高干+富二代+绝世美颜(男主姐,法拉利家族二儿子拜倒��下)+娱乐圈+天上人间+... 狗血再来得猛烈些吧!这真的是写《匆匆那年》的九夜茴么?就这还写了将近四十万字上下两册?作者神了, 出了这么多本书,还能来这么一部跟处女作网文一样的作品,真的不是把当年的出道作品拿出来充数了么?真的不是为了交差应付凑了一本?真的不是枪手在写大学开始的故事么?给跪了。留着的两星给幼年部分。一些对话和情节能透出纸张让人尴尬。
|
123 |
+
我不明白他们为什么这样迷信野史
|
124 |
+
唐七的小说除了那看似华丽的文字外,还有什么?读了唐七的华胥引和三生三世之后,我忽然觉得郭四娘的小说真是太特么好看了。唐七的小说和他的笔名一样,充满了装逼的气息。角色塑造也几乎和华胥引完全一样,两部小说的男主都是能力强悍的高冷系——装逼者的最爱,而女主则都是中二病带点转了几世的小忧伤......呵呵,您能稍微突破下自己,多少有点创意吗?
|
125 |
+
感觉很无聊~但是刚推出是看豆瓣评到8.多就下手了
|
126 |
+
希望我看的不是删节版的。
|
127 |
+
已经不能读中文小说了,太难看了,尤其是里面对女性角色的刻画,真是丢人。
|
128 |
+
「即便你是個僧侶,像我哥哥那樣,盛怒或屠殺也會在某天找上門來。因為沒有社會的存在你也無法以僧侶的身份生存。你摒棄社會種種,但要達成這一目標必須先成為它的一分子,從中學習並做出決斷。這是歸隱的悖論。」 / 四五年前第一次接觸到這本書的時候,是台灣譯者的翻譯,書名叫《菩薩凝望的島嶼》。前段時間朋友在力推這本書我以為是翁達傑的新書,之前並沒有注意到英文名,後來一查,才知道是同一本。但無論是哪個翻譯,都覺得蠻好的。幾年後再讀是另一種完全不同的感覺。我不得不喜歡那個「買」了她哥哥的名字 ——「安尼爾」的女法醫人類學專家。
|
129 |
+
越写越恶心 妈呀 这样的女主 太长的文 必然问题颇多
|
130 |
+
经常看到默默流泪……
|
131 |
+
2015年读完的,还行吧!
|
132 |
+
很喜欢,再多读几遍
|
133 |
+
蒂莫西·加顿艾什的《档案》《事实即颠覆》都很差!
|
134 |
+
这种意识流的看点在哪?
|
135 |
+
感觉自己被祖伊训的狗血喷头 但结尾有点马虎啊 不能拿大哥作幌子啊
|
136 |
+
这算哪门子的纯文学
|
137 |
+
行文过于流水帐,不推荐阅读
|
138 |
+
其实除了一点历史之外看不出男女主有什么时代差异
|
139 |
+
原来以为还可以,看完发现相当无聊。
|
140 |
+
与其说是创业的国度,不如说是创新的国度,可是一个国度的创新背后往往是它一路走来,甚至永远摆脱不掉的民族血泪。所谓多难兴邦,或许也有这层意味。
|
141 |
+
一百个赞
|
142 |
+
奇遇的重演
|
143 |
+
创新是这个世界保持增长的根本原因,创新需要知识技术及载体(人类和人工智能)。我们这个世界与几万年前人类祖先智人生活的世界的不同之处在于物理秩序(信息)的不同,现在处于信息爆炸的时代,不同的知识技术储存在不同的个体、组织中,要想持续不断地创新就要扩大我们的社会关系网络,以获得更多的知识技术和计算能力,所以,链接越来越重要。
|
144 |
+
宋朝部分写的太好了
|
145 |
+
印象很深的就是罗斯福想要帮助英国抗衡德国,每一步都很艰难,要说服国会说服人民,反观日本德国,希特勒,军部说一不二,制度的差距如此大。
|
146 |
+
好脑残的,诧异能印出来
|
147 |
+
专业影评写作指南
|
148 |
+
屁话多!!就是说这个好,这个重要。一点都不深入,中信居然给出版了,噱头大啊!!第三次工业革命并不仅仅围绕能源,也不仅仅是核电风电光伏!!
|
149 |
+
开篇便是甩掉你的坏旅伴,但后面就是一个complain的集合体,最后再凸显一下我能忍受着一切,牛逼吧。看来看去,整本书写的最好的部分是写给自己旅伴的孩子的信。也许真人是个有趣的人,但落实到文字上就算了。
|
150 |
+
对俺来说有点专业,但是确实对了解黑色电影很有用
|
151 |
+
俗
|
152 |
+
儒家文明PK海洋强盗文化,尘归尘土归土,全球化的今天,谁都不可能幸免。中国人真的有必要一直勤劳勇敢朴素下去吗?不劳动真的那么可耻吗?今天的中国会有不同于100年前的命运选择吗?一起拭目以待吧。。。话说,希腊政府真是够可耻的!!
|
153 |
+
把先贤们都说过的事情拼凑一下,炒冷饭。有点无聊,最多2星……
|
154 |
+
被我发现了刊印错误……比较实用的一本书,但是写影评最重要的部分是对电影知识的了解和笔头功夫,所以练习更重要。
|
155 |
+
目的性太强
|
156 |
+
自己看过的编剧类书籍中,内容最完整的一本。它涉及的面更广,不仅是一本剧本写作指南,也是一个(好莱坞)编剧从业指南。
|
157 |
+
故事的主题是创新
|
158 |
+
玛丽苏,拉分数的我。这书名无语了
|
159 |
+
新晋男神。
|
160 |
+
我记得我有两本来着
|
161 |
+
很不错
|
162 |
+
看到毕业礼那里��实挺心酸的,小时候我也像弗兰西那样的爱爸爸。
|
163 |
+
似懂非懂。
|
164 |
+
这是我读的第一本钱先生的书,当初的那种醍醐灌顶之感如今还清晰可感,不管怎样,我算是读到了一些东西。
|
165 |
+
好失望~
|
166 |
+
看完觉得很恶心,不能认同作者的观点,怎么豆瓣评价这么高?
|
167 |
+
凉生,我们可不可以不这么作……
|
168 |
+
烂书一本。
|
169 |
+
很美丽的地方,喜欢作者摘的那些水果
|
170 |
+
不知道该说是资本主义的虚伪,还是人性的虚伪。
|
171 |
+
语言圆润 论点偏大众
|
172 |
+
动机真的是青春期中二症典型例子...
|
173 |
+
图表上的字太大,影响了图表的直观性。
|
174 |
+
描写知识分子们在新中国成立后第一次政治运动中遭遇和表现的小说,然而却仅仅停留在历史场景的复述和控诉上,缺乏深层次的批判和思考;写法手法上也没有太多的技巧可言。看完卡夫卡和昆德拉的作品,再来读这本《洗澡》,实在有味同嚼蜡的感觉。
|
175 |
+
知我者,谓我心忧;不知我者,谓我何求。 历史的误会。
|
176 |
+
原来《庶女有毒》换了个名字出版。非常长的宅斗文,女主金手指大开,初看非常爽,再看经不起推敲。听说是抄袭了一百多本书,可怕。仔细看的话用词和标点符号都非常不到位,很幼稚。李长乐的美若天仙和智商真是不成正比,蒋家五个最优秀的儿子全都死了,算计人和吃白菜似的。
|
177 |
+
你丫知道什么是纪事本末吗
|
178 |
+
街上那点事儿。翻译不错,可以一看。
|
179 |
+
好长好无聊,可怜的端午假期
|
180 |
+
依然如此吸引人,但是有些郁闷,感觉比呐喊更阴暗些。不知为何极喜欢魏连殳。
|
181 |
+
差。
|
182 |
+
最后直接叫朋友说了结局,没了闲情看下去。
|
183 |
+
一出闹剧。如果说秋菊是轴,那么李雪莲就是作。小说里的人名:王公道 董宪法 荀正义 史为民 储清廉,真是受不了这种赤裸裸的反讽。先不管小说写的怎么样,反正读起来感觉很不好就是了。。囧rz
|
184 |
+
终于搞清来龙去脉
|
185 |
+
高二时候读的,记忆深刻
|
186 |
+
非常差。各种不靠谱。没有什么当地的风土人情,只有作者貌似站的很高的感叹。自我感觉太良好了吧
|
187 |
+
电影挺喜欢的,故事也挺喜欢的,可是这本书的翻译啊,真的好敷衍
|
188 |
+
酣畅淋漓
|
189 |
+
我不懂评分为什么这么高,太扯了
|
190 |
+
就冲这出版社的社品。。。尼玛一星都配不上!!!
|
191 |
+
浓浓的直男癌口气…好恶心…
|
192 |
+
忘不掉过去,就很难得到现在和未来的幸福,那种抱着过去和现在死磕的人最讨厌了!
|
193 |
+
1.作者何必不断强调自己是乡下人?你若看得起自己 没人看不起你 2.我还真不知道星巴克里除了美式之外两杯咖啡才收37元钱 3.这感情真是你麻痹来的莫名其妙 4.这个和何以完全不是一个档次的 何以比这个好看一千万倍不止。
|
194 |
+
王朔也就一街头低端草泥马
|
195 |
+
完全get不到点的男女关系
|
196 |
+
倾向性太明显了吧……
|
197 |
+
曾经如此,此后不再。要记得。
|
198 |
+
烂!!
|
199 |
+
白左垃圾
|
200 |
+
废话太多 看到第三页就会忘了第一页说了什么 可能是我悟性太差吧
|
201 |
+
对侯导的作品又多了一些了解。
|
train/datasets/test_data/book_review/train.tsv
ADDED
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
label text_a
|
2 |
+
1 冷酷无情地细致描绘了失明症蔓延后的人间地狱,几乎不用形容词,没有任何形容词的分量足以承载书中的情境。只是讽刺、暗喻、描述,不煽动,不呼吁,绝望压抑到死。ps题外话,要是在中国,编辑首先得让你分分段,标清说话主体再来交稿。另,发现灾难题材里的“幸存者”通常都是女性。
|
3 |
+
0 这么美的书名,这么sb的故事
|
4 |
+
1 年轻时荒唐狂热过,所以才有老来的沉淀和思考。
|
5 |
+
1 很奇特的一本书。。
|
6 |
+
0 不喜欢。
|
7 |
+
0 搞毛,女主你到底爱谁。
|
8 |
+
1 挺有意思
|
9 |
+
0 花一天,看完了,就是小白文,只不过加了个神的外衣,浪费时间,浪费时间!
|
10 |
+
0 拿腔拿调了两百页,最后漏了珍珠翡翠白玉汤的底子。
|
11 |
+
0 作为游记,过于流水账,革命之前年轻时候的格瓦拉的普通人生活
|
12 |
+
1 写的通俗易懂,可以对以色列有更深的了解
|
13 |
+
1 看这种淡淡的文字,冷静的叙述,最舒服不过,让人安静。和沈从文的味道很像
|
14 |
+
0 借朋友书读的,她推荐时说本来很精彩的故事被删减的没了趣味。我读也就这个感觉,但故事情节仍让我感觉得到爱情的诡异之处
|
15 |
+
1 笔记整理。理论高于实践的同时指导实践。
|
16 |
+
1 把人的阴暗面描绘得真好!又发现了一个很喜欢的作家!
|
17 |
+
0 看不下去+1,很早开了个头就没看下去,听说要拍电视剧,还是比较喜欢的演员演,就又开始看,这尼玛,依旧看得很痛苦,还是看不下去,我自认为很包容,很久没看到一篇像这样对我毫无吸引力的小说了,强迫症也不能让我补完,这也能改成电视剧????刘诗诗又要演这种类型的???
|
18 |
+
1 泪
|
19 |
+
1 买来的时候还以为是纯粹的关于电影的书,结果读了才发现是融合了人类学和社会学来解读电影,所以也略微晦涩一些;已经习惯了电影的一切,读过此书之后,才发现电影的一切并不是理所应得,如果我们生活与从放映机到电影术到电影艺术的时代,一定会更加赞叹和感激电影的魅力。
|
20 |
+
1 很有用啊!簡單清楚
|
21 |
+
1 我的入门扫盲教材,图文并茂,最初正是被本书的编写体例所吸引
|
22 |
+
1 耗时将近一个世纪,无数人死亡的大博弈最终在1907年通过外交方式解决。
|
23 |
+
0 实在不好看啊!小三和一个长得不错又有钱的衣冠禽兽的故事而已~
|
24 |
+
1 堡垒还是容易从内部攻克
|
25 |
+
1 无政府主义的乌托邦幻想者
|
26 |
+
0 怎么会有这样的书。
|
27 |
+
0 惊呆了,我在看微微一笑很倾城吧,设定基本一模一样啊,弃了弃了……
|
28 |
+
0 郝秀玉棒
|
29 |
+
1 很好看
|
30 |
+
0 百度百科
|
31 |
+
1 后几章非常难,多方博弈
|
32 |
+
1 戴爷。
|
33 |
+
0 简直是开玩笑。
|
34 |
+
1 通过这本书认识了不一样的伍迪·艾伦
|
35 |
+
1 好一个塞林格~
|
36 |
+
1 有意思的书,从另一面看电影的产生和黑泽明本人。以前一直疑惑为何芥川龙之介的竹林中电影版用的是另一个故事的名字,罗生门。原来背后有偶然因素,哈哈
|
37 |
+
1 金作屋,玉为笼,车如流水马如龙。刘郎已恨蓬山远,况隔蓬山几万重。
|
38 |
+
0 最后落入俗套...
|
39 |
+
0 这小学生作文一般的流水账到底怎么得到7.7的评分的?!
|
40 |
+
1 笑死我了,我爱你,普拉切特和克鲁利
|
41 |
+
0 看着玩儿还行
|
42 |
+
1 拿到手,一口气读完,好吧,早期的毛尖刚经历过暖洋洋的票友时代,对电影的热忱可谓真挚,对于电影迷来说,不啻为大餐。而后期的毛尖如今完全沦落为插科打诨,混圈子,互相吹捧的毛博士,去年的一本《乱来》实在令人嫌恶,再无半点好感。
|
43 |
+
0 皎皎也就是这样了好吗,文风永远都是掉书袋系列,男女之间永远都是走人淡如菊风,矫情死星人。。。
|
44 |
+
1 今敏导演真是一个可爱的人,制作电影那种事无巨细恨不得自己做完所有事情的状态真难得,缅怀。
|
45 |
+
1 伟大的作品。
|
46 |
+
1 半年才看完,不错
|
47 |
+
1 对每个人而言,真正的职责只有一个:找到自我。然后在心中坚守其一生,全心全意,永不停息。所有其它的路都是不完整的,是人的逃避方式,是对大众理想的懦弱回归,是随波逐流,是对内心的恐惧。
|
48 |
+
0 我觉得很难看··矫情地要命,初初看《守望灯塔》时的感受没了··
|
49 |
+
1 寒假前在新图书馆的教室里看完的。很怀念。
|
50 |
+
0 也看不下去的一部书,女主的那种刻意 孤勇,过于YY。
|
51 |
+
0 一点都不喜欢,并没能提供多少实质性知识。这种科普类电影书还是去看《认识电影》更靠谱。
|
52 |
+
0 本书完全可压缩为简短的十页有关历史简介
|
53 |
+
0 这三本确定是同一个人写的???构思,人物刻画,文笔差了不是一点点,总结两个字:难看
|
54 |
+
1 四平八稳的影评们
|
55 |
+
1 内容不错,很实用。就是性价比不高,需要做这么大一本表格吗……
|
56 |
+
0 一般!新闻流水账,入门级读物,罕有引发,缺少高论。看多了这些西方史学畅销���,觉得其实跟国内的历史故事书并无两般。
|
57 |
+
1 《大而不倒》是一群因为次债危机焦头烂额的银行家,政府;《大空头》是一群因为次债危机大获全胜的怪人,一个事物,两个极端,世界太疯狂。
|
58 |
+
1 潘帕,你让我说什么好呢。
|
59 |
+
0 YY的过头了,女猪老强调她是乡下来的,但某些言行一点不像啊。为了残疾美男我勉强看了一半...
|
60 |
+
0 挺一般的一本书。
|
61 |
+
1 以史为鉴,可以明得失。国人学史必读。
|
62 |
+
0 无趣
|
63 |
+
0 盛名之下,難副其實。失望。
|
64 |
+
0 为了虐而虐,情节好扯,我竟然看完了。
|
65 |
+
0 情节牵强。
|
66 |
+
1 前几天豆瓣推送,脑袋一热就下了单。68块大洋,希望书有所值
|
67 |
+
1 所谓天才,的确是天赋与勤奋的集合,今敏再次佐证这个不变的真理;极度缺乏资金、人事关系危机重重的四面楚歌,今敏的描述依然充满趣味,各种形象的比喻让人忍俊不禁,也让我们找到些许他汪洋肆意的想象力之现实根源;看其创作灵感的涌现和之后修改,都非常受益,对其作品的认识也更深一层,特别喜欢第一章《遥远千年的呼唤》。
|
68 |
+
1 入门 很有意思
|
69 |
+
0 20170723。书评都在说人性,可是为何没有人发现汉尼的康复来源于黑暗魔法。为何要救助汉尼呢,天降神兵吗,好生不解。
|
70 |
+
0 两星。穿越到三国,男主是曹操,生下了曹冲。这样好吗?
|
71 |
+
0 阿治实在不该去碰鲁迅。
|
72 |
+
1 我喜欢这个有理想的文化青年
|
73 |
+
1 “与无可救药之地,疗人寂寞,是菩萨行”。
|
74 |
+
1 终于读完了,非常震撼,美德的毁灭与世俗铺天盖地无可避免的堕落对比强烈,对现世也有非常强的反思意义。显克维奇是伟大作家,写你往何处去写尽基督徒美好,写十字军骑士写尽滥用信仰的可怕。
|
75 |
+
0 看过的最为垃圾的一本旅行志 无病呻吟幼稚可笑毫无亮点 一个不入流的背包客写的看似深刻实则空洞的所谓文字
|
76 |
+
0 给一星是因为没有负分可以打!剧情雷,文笔差,完全无可赞之处,不明为何那么垃圾的书能出版!同类型的故事很多,这是我看过的最烂的一部。
|
77 |
+
0 从别的书摸过来的,微博上看几位作者应该关系不错。政治部分还不错读,言情部分就有点水平线下了。尤其是女主角神烦成这样也是好久没遇到过了。
|
78 |
+
0 有讨厌的角,只快进挑主角的戏份
|
79 |
+
0 2017的第一本书,赶在开年第一天的末尾看完了,书是从学校图书馆借的。 陈北尧和慕善的爱情,血腥又慈悲。张爱玲说,一座城沦陷了,为了成全他们的爱情。 假若现实里发生了类似的事,我大概会逃得越远越好吧。 喜欢庙里的那句话:一曲清溪一曲山,鸟飞鱼跃白云间。
|
80 |
+
1 既畅想明日,更揭示今天。继午夜文库后又推出了幻象文库,爱死了新星出版社!
|
81 |
+
1 死若樱花灿烂
|
82 |
+
0 我已经过了看小言的年纪了 完全没有耐心看下去 而且还是带着神话 奇怪的文章 不值这个8.5分 开头看着还以为是耽美 只想说一句 你在逗我?
|
83 |
+
0 不成立的故事。爱情不过是一种低等的情绪,最多也只是无聊的游戏。一个把爱情当作毕生追求的人实在荒唐愚蠢,居然还能被称作了不起,无法理解。
|
84 |
+
0 终于看完了 通过这本书我发现原来一部作品的情节不是最重要的 字号大小和排版疏密才是决定看得舒不舒服的 这个作者用的字敢再小点吗 而且内容也是比较费脑的 真是让我看得比较累啊
|
85 |
+
0 比较无聊的一本
|
86 |
+
0 忍无可忍,无需再忍。
|
87 |
+
1 历史与你 图文并茂 思考题设计的很好 不过最后几本政治倾向太强了些
|
88 |
+
1 文字图片是真好,包括历史真实和采集经历,值得一读再读。可惜了这个做法,看来汉唐阳光hold不住画册。就算退一步,编辑也不合格,字体就不多说了,连译法体例的前后统一都没做到。为什么我要挑剔呢?一当然是职业病,二就是关注这个题材,希望书能够锦上添花,扩大影响。现在这个样子我都没法买来送人,只好让我爸自己去图书馆借了……
|
89 |
+
0 我一直都在想为什么明明可以写得通俗易懂的话,扬之水总是写得云里雾里,让人昏昏欲睡?今天读这本书忽然明白了,这就是典型的「文笔不够,资料来凑」啊!
|
90 |
+
1 想当年还是你推荐我读的,如今已经成了回忆。
|
91 |
+
1 永生,男变女,设定的时髦值碉堡了。个人觉得这是伍尔夫最好的一本书,不像她别的作品一样晦涩,但风格依然如冰冷的火焰一般。这书是伍尔夫写给同性情人的思念,里面关于性别的心理描写很有意思。
|
92 |
+
1 大三下 紀錄片創作課讀物。手册指南
|
93 |
+
1 因为项目所以特别找了下关于以色列的书。从历史发展的角度相对比较深入了解析了以色列的“创业基因”,读完后对以色列的发展历程,以及军事,经济和文化都可以有一定程度的���解。
|
94 |
+
0 一直都是喜欢她的书的,但是当全部的都是一样的基调,然后又缺乏了一些逻辑性,再喜欢也是会腻的。。这和东宫不一样之处就是那是女的死,这是男的死。。大同小异了。
|
95 |
+
1 烧水时候站着看完的小说,想给六星。俄国的圣母崇拜致使他们小说里的女性是塑造得最丰富的形象。妓女和圣母、贞女和荡妇、贵夫人和情妇的张力造成无以伦比的艺术效果。在男主人公身上无可调和地冲突的复调,在她们身上奇异地融合。丽莎一转身就是Sonia.
|
96 |
+
0 大失水准
|
97 |
+
0 看完我觉得我应该去写小说,骗稿费。
|
98 |
+
1 历史与哲学缠绕交错,历史变成了各种因素与巧合合力之物,可惜对于影像资料历史,知道的还太少。
|
99 |
+
1 抗战期间,南渡;抗战结束,北归;又一次抗战结束,离别。说起来,简单的二十余字,过起来,却是很长的岁月,有煎熬,有无奈,更有太多辛酸。这是系列书的第二本,读起来依然让你觉得荡气回肠。不是文字有多么大的魅力,而是书中写到的人物以及他们的事迹,着实让你的觉得历史怎么可以如此厚重!上学时候,老师总在讲,由于历史原因,这样了那样了。其实,直到现在自己看了一些书,才知道原本很多历史可以改变,只是由于身处那个时代,太多无奈!假如当初国民党不那么腐败,执了政,当了权,中国的今天会不会更好一点?其实,哪里有那么多的如果!台湾的现在,多么的理想吗?其实,更遗憾的可悲的是那些大家,如果没有这样的北归,他们肯定会为我么创造更大的财富!可是,历史过去了。Now, 23:31, September 9th, 2016。
|
100 |
+
0 新加的内容,就那么一丁点。有老版的同学不用买了,新的内容导读说的比作者写的还多,老版的错误与缺失,新版压根没订正。直接照搬
|
101 |
+
0 太文艺。不喜欢。
|
102 |
+
1 花样的年华,华丽的转身
|
103 |
+
0 连仓央嘉措的诗都能弄错,只能说是为了装吧
|
104 |
+
0 把这个作为盖棺之作实在是太残念了
|
105 |
+
1 是枝萌大叔的日常与拍电影心得。文字虽然带有鸡汤味道,但讲的道理有太多共鸣点!
|
106 |
+
0 低于2星。①5个独立小故事。②无聊。
|
107 |
+
0 求放过!
|
108 |
+
0 基本上没看懂,只是温习一下以前就明白的。
|
109 |
+
1 很不错的博弈论入门书,条理很清楚
|
110 |
+
0 太杂散,就像一篇一篇博客,书中太多日本特殊用语,翻译有点中文夹杂日文的感觉,
|
111 |
+
1 很多《电影美学与心理学》已涉及。
|
112 |
+
0 又臭又长的流水账 看完就记住一点 作者是哈佛毕业的。。。
|
113 |
+
0 过于混乱,不知所云。并不反感飘渺梦幻的写法,但那也是建立在工整逻辑的基础上的,《务虚笔记》便是极好的案例。
|
114 |
+
0 "慕名"读了一本“宅斗文”哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈所以就还是开上帝视角的玛丽苏女主嘛!!!!
|
115 |
+
0 哦
|
116 |
+
1 最爱2010
|
117 |
+
0 在b站上看到这里的评分这么高,特意来打分的
|
118 |
+
0 长得叫人读不下去,民国大师八卦谈,除了陈寅恪,对谁好像都有点揶揄。作者肯定特别爱看《水浒》,尽拿来做比喻。有一些事实,但从作者的识见出发不一定得出可信的结论。万卷出版的,错别字和体例问题很多,文字质量相当于盗版书。没事闲的也可看看,聊为一助。
|
119 |
+
1 从古代以前说起~NB!
|
120 |
+
1 好读!最喜欢《拉萨有铃铛》,有几篇前半部分读着感觉流俗了,后边笔锋一转,不觉泪目,就像《蝉的歌》最后几段想到了自己的朋友!不像一个94作家的笔触,推荐!
|
121 |
+
1 《大地之柱》的原名比这狗屁的《圣殿春秋》强太多了。
|
122 |
+
0 我淡比,居然看这种东西!
|
123 |
+
0 冲着那些没品笑话加一颗星吧.
|
124 |
+
1 生为女子。
|
125 |
+
0 没有原因的特别不喜欢 看到和三公子在船上打召玉,实在捏着鼻子也看不下去了……
|
126 |
+
0 书里的男性角色大概都可以去死一死了 不过最后却是女主死了 也挺爽的
|
127 |
+
0 有点扯,而且好长啊,好没劲 呵呵 抄袭
|
128 |
+
0 不会写情
|
129 |
+
1 比较有意思的一个系列
|
130 |
+
0 色彩,年轻的想象力和表现力,以及苍白
|
131 |
+
1 有几篇那么亮眼,好久没看短篇小说了。
|
132 |
+
0 看似有理,实则无用!
|
133 |
+
1 群星闪耀
|
134 |
+
1 看完后明白想做一名好演员那绝对不是一件容易的事情,那是要靠日积月累的排练 练习不断地向自己提问,替换,寻找人物的特点习惯,把自己自然的带入进角色里头。尊重!表演艺术!
|
135 |
+
0 我竟然还看了40多章,没脸见人了!
|
136 |
+
1 名家名著,一遍基本不太懂,已做好啃多遍的准备!
|
137 |
+
1 萨拉马戈的眼泪,是医生妻子在盲人世界里,那双看得见的眼睛。
|
138 |
+
1 由于这两天的际遇 看完这本书心中的痛楚缓缓释放 命运 征兆 迹象。19号晚上去看电影。 草蛇灰线 冰山下沉默的部分 突然握住温热坚定��手 你的拥抱 以及最后的不告而别 仓促收场 目光是尖锐的 为何如今你我不交一言 有一天还会再相逢吗 那时候我会是你期冀的模样吗 “她对接下来的变故浑然不觉,毫无预见,一切也没有征兆。”“一切早已铸就,往昔从未离她而去。” 作为一个花痴 我只服这本书的阐释
|
139 |
+
1 真实到可怕。尤其是各种内心情绪的细腻的描摹~简直是让人怀疑这真的是男人写的么?他怎么那么懂女人的心呢。以及感觉看到了自己以后生活的全部~
|
140 |
+
1 连续五个小时的阅读,大脑像失控的转盘飞速转动,却是机械地转,绞动思维又茫然混沌,理不出清晰思路,最后只能定为一场疯子的臆想。
|
141 |
+
1 金仙观之谜和离合诗之谜依然没有揭开……第四部呢?第四部呢?第四部呢?
|
142 |
+
1 也忘了。
|
143 |
+
1 看见伍迪就忍不住买了。从创作者角度重新看待伍迪的电影,这本书给了人们这样的机会;书中没有多少八卦和伍迪在电影之外的生平记述,但要比那部纪录片更实在。
|
144 |
+
1 近距离观察大师们的思想和生活,体验大师们的人格魅力
|
145 |
+
1 对于我这类经济学白痴逻辑学优秀生来说,不错。
|
146 |
+
0 眼睛都快看瞎了,只想快点结束,,,事实证明浪费时间浪费眼力读一本没有价值的书
|
147 |
+
1 第一次看香港推理,没想到意外的好看。
|
148 |
+
0 我tm再信豆瓣推荐,花钱买这种书,我真得剁手了,气死我了
|
149 |
+
0 首先真的不好看啊,都是第一人称一直在bbbbbbbbbbbbbb,除了结尾稍微能勾起一点情绪波动,其余时间的阅读都是对读者的折磨。其次不管原文怎么样,翻译是不好的。比如作品中很多吃穿用度用词已经进入汉语,译者本地化得不够彻底,显得很生硬。
|
150 |
+
0 政治倾向很重。立场不客观。考据多为野史,可信度质疑。文笔有很多主观色彩。和史记比比简直天上地下
|
151 |
+
0 不要把400千字的时间浪费在这本书上
|
152 |
+
1 大历史里的小人物,却也是不屈不挠得另人敬仰
|
153 |
+
1 简直五体投地
|
154 |
+
1 很多现在看来是理所当然的,其实是经过一代人的牺牲(事业甚至生命)才得来的。
|
155 |
+
1 《一個隱形人的畫像》 very impressive
|
156 |
+
0 《活着》五星,《许三观卖血记》四星半,《兄弟》四星,……《第七天》两星
|
157 |
+
1 六六折开卖
|
158 |
+
0 又出厕纸了?
|
159 |
+
1 喜欢这种絮絮叨叨的聊天 不用在乎现实 自由的对话
|
160 |
+
1 赤贫的孩子成长的故事
|
161 |
+
0 能再烂点吗?
|
162 |
+
1 可以一下午看完的编剧入门书,中文版有关键词汇的英文翻译,很好用
|
163 |
+
0 实在好讨厌这种当年席娟腔的语气写游记,赚点小便宜就很开心,觉得自己聪明伶俐古灵精怪一样。
|
164 |
+
0 很一般的高干文,翻过就忘了
|
165 |
+
1 这真的是一个很棒的故事!
|
166 |
+
1 我是看的电子书,几天就把这四本扫完了,视力硬生生下降了好多,看来以后还是看纸质书适合我。虽说其中有两本写作年代久远,但作者的想象力到今天看来仍不过时。人类从哪儿来,要到哪儿去,希望就象书中写的那样吧。如果说阅读的快感,我还更喜欢刘慈欣的《三体》呢!真的有一个高度的文明,留下一个机器就能改变一个星球上的生物的进化历程?真的能想摧毁就能摧毁一切?就象人类对于蚂蚁,这个文明对于人类也是高高在上。太操心了,还是过一只蚂蚁愉快的生活吧。
|
167 |
+
0 弃文了,书中有乐谱~
|
168 |
+
0 完全地,圈钱
|
169 |
+
1 小津的散文与电影风格一致,简洁、质朴,保留日常生活的原貌,在无意中发现美。信的画面感很强,像分镜剧本。如果那田野里鲜红的彼岸花、秋天城外连绵的麦田、战场上纷飞的白色杏花,都不是侵略中国时看到的,就更好了。翻译是亮点,十分怀疑译者是学中国古典文学的。
|
170 |
+
1 苦难并不是一无是处。
|
171 |
+
0 读了五分之一
|
172 |
+
0 平铺直述,实则波流暗涌,结婚、误会、送回江南、误会、怀孕、误会、受伤、生产、冷淡、怀孕误会、逃离、三年后重逢,北地血性男儿与江南温婉小姐,民国乱世绝恋。
|
173 |
+
1 超级不喜欢过于长和仅仅只是介绍往后会在书的本体中看到的内容,读什么都在读之前被剧透了,还哪里来的新鲜感?!我想看的是内容不是那个长到十几页而又空泛泛的前言!!!!!!感想你就他妈的放到最后不行吗?一开始就从第一章介绍到了最后感谢人员名单你让我看啥!
|
174 |
+
0 張祖畫說 覺得一看東野的書 就好捨不得。突然發現我已經好久好久沒看東野圭吾了。感覺讀「白夜行」已經是上輩子的事。如果沒記錯的話 這本應該是東野大神的出道作品。看完之後也確實感受到了這本處女作的青澀之處。也才剛看完 就已經對錯綜複雜的關係網和人物名記不清了。東野圭吾的底牌永遠不會讓你失望。最精彩的不是殺人手法 而是殺人動機。可惜我在看書之前就已經知道殺人動機是甚麼了... 因為自慰被老師看見而殺老師... 唉 讓我說甚麼好...
|
175 |
+
1 没想象中好。不过家书能写成如此风采已是难得。颇有知识分子的清高劲,话大多俏皮,但某些针对“未开蒙”民众的话读来膈得慌。 不从土地中来的学人大抵都是这样,文字厉害精彩,思想也精细,但偏偏少了让人亲近的乡土气。不如邓晓芒。
|
176 |
+
1 太好看。书评在日记。
|
177 |
+
1 潜龙在渊
|
178 |
+
1 因为有周振鹤老师这样的学者,难得地觉得我蛋还是挺不错的
|
179 |
+
0 好吧,那个电视剧很恶心,我居然看完了,这书写的更次了,我竟然也翻完了
|
180 |
+
0 2.8分
|
181 |
+
1 “对于某种优越的身份依依不舍,其实就成了不平等的捍卫者。”
|
182 |
+
0 望天,不是一般得烂好伐,作者已经YY到一定境界= =
|
183 |
+
0 我实在是写作课看得很痛苦想看看中文版,但这个翻译真是过分到谷歌翻译都比它精确翔实了……译者只要不会翻译的,就会说“XX来着”,很怀疑怎么找到这么个人的。
|
184 |
+
0 只有我觉得这本说写的很差吗????
|
185 |
+
1 好美的小说,要去看看电影
|
186 |
+
1 影评人辞典
|
187 |
+
1 不能永远憎恨 但绝对不能忘记
|
188 |
+
1 就是我想要的思路,当然知识层面和结构要更新,但打通各学科是必然的
|
189 |
+
0 非常狗血的一部书,但是坚持看完了,真的狗血到不行,好么?竟然还有第二部
|
190 |
+
1 很难评价的一本书,看了很久,挺难读,但最后有种终于释放的感觉,心理治疗真是对心理咨询师和患者的双重折磨。
|
191 |
+
0 我曰尼玛,终于读完了!!
|
192 |
+
0 left2,安东尼2.0。当然咯,一定会有粉丝买账的啦。
|
193 |
+
0 就是几篇论文的合集,有独特观点,就是没什么太大的说服力!
|
194 |
+
0 我的大部分半途而废的书是因为读的艰涩,觉得智商有限。这本书半途而废是真心觉得无聊。爱情,不是给你几个青春荷尔蒙时期的故事就能讲清的,看过霍乱时期的爱情再看这个实在是入不了眼。
|
195 |
+
1 作者多年关注小津的研究,积累了丰富翔实的史料,写作本书仍然没有信手拈来之感,而是亦步亦趋。尝试了用各种角度对小津进行观察,在表达观点时仍然相当节制,令人敬佩。但编辑和翻译方面似乎略有问题。
|
196 |
+
1 从安乙家拿来的书。慢慢地看了一年。怀念那一些行走的日子。和死过去的时间。
|
197 |
+
0 烂。即使是村上君,我也不得不说
|
198 |
+
0 不好看!一点都没意思,到最后是几分钟跳着看完的,一点都没有看的欲望
|
199 |
+
1 断断续续的读完了这本书。它太好读了,任何时候打开都会很快陷进去。可它也太可怕,它对于人物心理的描写是真实到残酷的,不需要过渡和转换,你就能进入到人物的内心。平淡的故事下一个女人对于家庭的所有眷恋,和亲情复杂的纹理你都能摸透。很佩服作者的功力。
|
200 |
+
0 慕残天雷
|
201 |
+
1 永劫回归。你喜爱这个想法或憎恶它?以你喜爱它的方式来生活吧。……毫无疑问地成为人生最爱的书之一。
|
202 |
+
1 全彩印刷很赞, 很好地表现了书中的电影画面. 每一章都是一位电影摄影师的讲座, 介绍示例中的问题和解决方法非常实际. 当然, 这也意味着, 有一定影视摄制/道具灯光等等相关领域基础知识的人阅读此书可能会更有收获.
|
203 |
+
0 “从事写文章这一作业,首先要确认自己同周遭事物之间的距离,所 需要的不是感性,而是尺度。”(《心情愉悦有何不好》1936年)
|
204 |
+
1 无神论者的坟墓
|
205 |
+
0 要说实话么?真的要说实话么?会不会被为数众多的三体粉撕成碎片?首先作为一个文科生对科幻方面没有吐槽资格这个自知之明我是有的。各类科普式的剧情展开很开眼界?特别是最后曝露三体文明真相那个地方,但是容我吐槽一句,好歹是个小说不是科普类书籍,一下子情节量都在那里爆发了感觉好2333啊。就像以前有人吐槽FFXIII一样——世界观是如此的宏大,你他妈在游戏里体现出来啊!世界观资料全放文字资料里你做个毛线游戏!←这种感觉。然后小说方面剧情衔接什么的各种硬伤。最最最让我想吐槽的是那“人类文明是如此无药可救→这种烂文明毁掉吧”这种说不清是圣母还是救世主还是中二病的思想,在小说中用来做人物的动机哈哈哈,真是看着各种神棍啊。后记里还提到道德!大大别这样!你写自然科学科普吧!不要染指人文!简直就是槽点批量生产机!
|
206 |
+
0 。。。看了一半。受不了文笔太渣= =。完全不会描写人物对话不会好好写记叙文的作者= =。。嘛总之就是女主不知道为什么就傍上了一个钻石王老五而且那个钻石王老五在没有和女生做出什么事情的时候就喜欢上了她= =。。两星半。。。
|
207 |
+
0 这书可读性极差,居然有这么高的评价,真心给跪了,豆瓣不可信
|
208 |
+
1 ���人
|
209 |
+
1 力荐!
|
210 |
+
0 难看到爆,有这时间还不如再看一遍全球通史!
|
211 |
+
0 哦,我的上帝,我发誓,这碗鸡汤的翻译实在是糟糕透了!
|
212 |
+
1 宏观决策,只看了前半部分
|
213 |
+
1 两小时读完 理性务实
|
214 |
+
1 好多看不懂。
|
215 |
+
0 无法代人,没有情怀。
|
216 |
+
0 我实在欣赏太宰治老师不能,迅哥也能被你写成如此矫情姿态。
|
217 |
+
1 按图索骥,找了好几部没看过的电影看了,确实不错。不懂电影学术,但作者是真心爱电影的人。
|
218 |
+
0 比较适合我现在的心情。这本书已经买了快两年,现在拿起来继续读可谓是天时。戈列格里斯做了他内心不安时常做的事:打开一本书。在重新拿起来三天后,读到接近二分之一,读不下去了。反复的咀嚼后,对男主最后的人生选择已经没有兴趣,因为,可以把具有时代意义的事情叙述得如此无趣,也可以合上书了。
|
219 |
+
1 (P)被猪油糊了眼的中国影民万岁,这故事完全没意思
|
220 |
+
0 不太适合我,这位的电影也不适合我。仅此而已
|
221 |
+
0 不知道为什么评价高
|
222 |
+
0 好闷!看不下去,读着就睡着了!
|
223 |
+
0 很糟糕的一本书,占主要篇幅的都是对实验的描述,但是对结论的深层分析(心理、经济等),还有现实生活中例子的引申的却一笔带过了。盛名之下,其实难副。这本书太肤浅了。稍微让我感动一点的例子是社会规范和经济规范的碰撞。“一旦社会规范和市场规范发生碰撞,社会规范就会退出。换言之,社会规范很难重建。”这让我想到了文革,现在确实很难再重建社会规范了。但是我始终相信每个中国人都流的有儒家的血脉。虽然儒家式微,但是星星之火可以燎原。就像日本二战后道德体系崩溃,全民滑入拜金主义思潮。但是等到经济恢复后,传统的一些道德规范和民族性格又跟新社会形式相结合,慢慢复活。这是民族文化的根。
|
224 |
+
0 需要认真地怀疑豆瓣有水军了。
|
225 |
+
0 为什么评分如此之高,让我好生失望,一直吊着我的口味,这种感觉很讨厌,最后也没什么特别出彩的地方,有种强行文艺的感觉。
|
226 |
+
0 爱得莫名其妙???喵喵喵???有点做作
|
227 |
+
0 還行吧,足夠為自己的不環保行為去找借口。
|
228 |
+
0 矫情。但还是说一句。相恋是两个人的天长地久,相思是一个人的地老天荒。
|
229 |
+
1 相当有趣!
|
230 |
+
0 我曾一度看完序之后以为这位作者品位真好,但是读完两章后发现历史还是历史,作者写得真的很烂,读起来很难受。有种砸了BBC招牌的感觉,但是BBC也不是每一部记录片都是高分。
|
231 |
+
0 不好看。情节真的太狗血了。
|
232 |
+
0 鸡汤
|
233 |
+
0 淡老师说:这本《乱时候,穷时候》描写的中国平民的生活,可以总结成:相互坑,使劲生,窝里斗,欺负人,恶狠狠,特兴奋。那是真有精神头儿。 ——就是这样。
|
234 |
+
0 全篇通读了一遍,这么多打五分的都看过书么,完全就是过誉。这年头还有书托了。
|
235 |
+
1 虽然我没做过党的书记,但是为啥这么感同身受捏?或许像瞿秋白一样,我一直向往与熟悉的,也是一种文人式的生活。虽然“一为文人,便不足观”,但文人还是做文人擅长的事情吧,不要勉强自己去开会写报告假装历史开创者了。瞿秋白的经历真是历史的误会,各种机缘巧合,一个想去北大中文系好好读书的文艺青年就这么走上了革命的道路。被架在政治的高位上下不来,那种感觉真不好受。临死之前,遗书写个差不多了,瞿秋白做的最后两件事是:推荐了几本值得看的书,感慨了一句“中国的豆腐是顶好吃的,世界第一”。
|
236 |
+
1 从新的角度看反垄断的出发点和结果
|
237 |
+
0 两星不能再多了。都说现在游记水平参差不齐,这本就是就是拖后腿的。两星给前半部分日料,后面的流水账看完有种被骗的感觉。看了短评发现是作者和熟人的自娱自乐,仅仅一百多人刷出的8.2分真心指导性不大。现在出书的门槛都这么低了么。
|
238 |
+
1 虽然是英国作家,但是是讲希腊的故事。现在对希腊产生了强烈的兴趣。 书还是很好看的,尤其是看完了天使望故乡这本没有太多drama的所谓小说之后再看这一本。然而天使的作者沃尔夫明显感觉比希思罗普的文笔要好。后者在岛里讲故事的能力不是很强,故事节奏不是很好,不知道是不是中译本有删节的缘故。 但是故事还是很吸引人的,花了一天半读完,除了韩寒的小说和期末的考试突击数学,很少有突击一两天读完一本书的经历。 总的来说,瑕不掩瑜吧,是一本感人至深,让人产生时间的洪流和人生多艰感慨的好书。
|
239 |
+
0 看不下去,一个简单的观点不断重复。 分明一段话就能解释清楚的东西,写两页。
|
240 |
+
1 山东--临沂--蒙阴路上的阅读
|
241 |
+
0 这难道不是盗墓笔记吗!!!
|
242 |
+
1 的确翻译的不怎么样。
|
243 |
+
1 好书推荐
|
244 |
+
0 好像比上一本读来更通顺,没啥意思有点浪费时间。作者好像特别擅长景物描写,能想象得见非常美。
|
245 |
+
1 当得起"Masterpieces"这个书名。
|
246 |
+
1 很幽默风趣的一本书,很喜欢
|
247 |
+
1 翻译得不错,但是原作的前两章写得实在枯燥,坚持下来的人才能读到最后。
|
248 |
+
1 塞林格~~~~~太心水了~~~~~
|
249 |
+
1 几度热泪盈眶= = 梦想家的时代
|
250 |
+
1 谁写的内容简介…真见鬼 叹
|
251 |
+
0 很大程度上是翻译问题。
|
252 |
+
1 大胡子的最后一本书,一个星期读完,这个美国老人的朝圣之旅令人赞叹又汗颜
|
253 |
+
1 不存在既存在~可我仍不明白其中的真意~看来需要多读几遍~
|
254 |
+
0 只能给两星,南天门打了仗回来就开始罗里吧嗦的讲,除了战争真的没法看,啰嗦而且立意不明,如果是想表示反战我的确看不懂整篇罗里吧嗦的说些什么鬼东西。感觉能删一半
|
255 |
+
0 最后的小说还挺有爱的
|
256 |
+
0 天雷!
|
257 |
+
1 果真是拿起来就放不下的一本书,故事情节扣人心弦,跌宕起伏。让我所读的都相形失色,问题的关键是作者高罗佩竟然是个荷兰人!!!感觉自己都不是中国人了😳😳
|
258 |
+
1 趋势总是在变化,我们需要找到最根本的东西!历史总会给我些启示
|
259 |
+
0 虽然衩姐很好,但这本书的评论也太虚高了
|
260 |
+
0 #论有些言情直接给一星就好啦# 拉低逼格系列之各种奇葩写手 30=v= P.S. 对 我是真的看过
|
261 |
+
1 纯净的、善良的“我的朋友”。世间唯有一种罪不能被原谅,那就是故意的残忍~
|
262 |
+
1 直到80年代去美国访学,吴才真正接触到了西方经济学,那时他已经50多岁。这一代经济学家的观点主要来自于多年来的亲身体验,肉眼观察,没多少学术成分——因此吴主张求稳、求实际,成为了“改良主义者”,与现在中央决策者中的主流观点一致。
|
263 |
+
1 正在读~
|
264 |
+
0 天雷!
|
265 |
+
0 怎么说呢。。。。相当鸡肋的一本书。。。= = 闪光点不是没有~可是。。。情节还能更老套些吗?世界观还能更陈腐些吗?
|
266 |
+
1 很多很多对话,有些看似不知所云的nonsense,不过我真心喜欢这两个有所关联的中短篇小说。过段时间重读之后要静下心来写篇书评。
|
267 |
+
1 除了德勒兹和福柯外,其他几位的不用看也行
|
268 |
+
1 看的日文版,例子几乎没看————这真的是本科教材吗好可怕wwww
|
269 |
+
1 不错的书,很实用,分解的很详细
|
270 |
+
1 可以用来指导影视赏析,也可以用来观察人生。(得)
|
271 |
+
0 需要慢慢品味去理解!
|
272 |
+
1 正如封面所标榜的那样,丹布朗式的巨著,小说的感觉强了点,分析和介绍整个危机的来龙去脉少了点,不过仍不失为一本关于次贷危机的好书。
|
273 |
+
0 故事编排有点杂乱,快速读完,无感,可能期望太高。
|
274 |
+
0 这绝对不是一本合格的游记或者日记,偏向于作者喃喃自语,文笔欠奉,关于羌塘也没有什么特别的理解,这算啥风格?BBS意识流集结文?这样的超高分评论还说都是喷子,真是醉了。。也许是不认识这位大侠的关系因而无法理解,他人品分也许很高。。PS.有些段落的心理描写也还不错。但重复也太多了。
|
275 |
+
1 黄色封面好评。Les Matins gris de la tolérance --- 是挺难翻的。
|
276 |
+
0 比尔布莱森他用一贯的幽默带着我在澳大利亚走了一圈儿,将旅途中的趣事和这个国家的历史一股脑都倒给我。无奈上海译文的这几个翻译水平太差,已经影响到了阅读,达到令人发指的地步!
|
277 |
+
1 王小帅 骆以军 黄丽群 陈雪 小野 联袂推荐 华语世界实力名家力荐 新生代写作者中头角峥嵘的一支笔 第六届华文世界电影小说奖首奖得主胡迁 迷人、离奇、惊奇、爆裂、令人惊叹的中短篇小说集
|
278 |
+
1 真是好久都没看过这么好看的小说了...
|
279 |
+
1 送给最喜欢的人的童年
|
280 |
+
0 我要是再看穿越就不是人。
|
281 |
+
1 耶鲁公开课里面好像都讲到了这些
|
282 |
+
0 水合靠<五蠹>一站成名,但是其他文太令人失望了。
|
283 |
+
1 提纲挈领
|
284 |
+
0 并不懂豆瓣的评分,感觉好表面,生搬硬套挪来的文字。不如三毛的撒哈拉写的有趣生动。就像是到别人家做客,你觉得你看到了一切,其实也就是你看到的那么一瞬间,但眼睛看到的未必是真实的,你以为的只是你的短暂一瞥,太不深刻,不建议大家购买。。。朋友推荐了夜航西飞,听说很不错,打算入了。
|
285 |
+
1 德勒兹在最后说哲学是干出来的,他就是在用哲学概念的实践来做出一种电影理论,它不是关于电影的理论,因为理论无法取代实践的电影本身,他的抱负是在创造出尽可能多的事物-物件-存在理论,由一通万的理论,万万相透的拓扑理论,他的理论是一座迷宫,不是没有出路,但是遗落的线头一如历史般千头万绪。
|
286 |
+
0 老生常谈
|
287 |
+
1 为了写期末论文匆匆翻过,还真是本好书��!但就是没有我的论述角度╮(╯▽╰)╭
|
288 |
+
1 实话实说,不喜欢前半部分,喜欢后半部分
|
289 |
+
0 吐了
|
290 |
+
1 在封闭空间的小团体内,最丑陋的和最可怕的人性都暴露出来,不再拥有道德和文明。在世界朝着不可预测的方向毁灭之时,萨拉马戈却给了故事一个急刹车——一个荒谬绝伦的收笔。
|
291 |
+
0 给六岁孩子看差不多
|
292 |
+
1 正义必胜的佳作
|
293 |
+
1 大师写大师这么难得了,更何况是安东尼奥尼和文德斯。序和译后也很得我心啊。要把云上的日子再翻出来看看。
|
294 |
+
0 太小白的文果然读不下去,努力到了最后几十页终于放弃了,秒了结局,收摊,over!
|
295 |
+
0 不喜欢这个类型。以为是恐怖的才看的,但感觉主要的是言情,恐怖的部分也不吓人
|
296 |
+
0 忘川,又见忘川,能不能有点创意啊,不失忆会死啊,我真没觉得哪里虐了,虐文才不是这样的,撑死了还是篇狗血文,要不是这是篇古代文没准我就打两星了,我本来还以为古代文怎么样都还能有冷月如霜的水准,匪我思存不愧是狗血文作者的先驱啊
|
297 |
+
0 我觉得这个写法还不错,我跳着看觉得老了的玫瑰比较可爱,不过我实在觉得此女对人生甚部负责,年轻时完全就是为非作歹。另,我好喜欢苏更生这个名字。
|
298 |
+
0 阴谋是有的,不过不可能下这么大的棋。二战后通货膨胀加速倒是事实
|
299 |
+
1 我就知道我们的情绪有重叠
|
300 |
+
0 呃,主角弱智到我认为是作者脑子有问题。其实挺不错的一个题材,不过作者写文时,脑子不知道跑哪去了。
|
301 |
+
0 采用技术分析非常危险,因为历史股价对未来股价没有参考价值,股票市场也不存在趋势。
|
302 |
+
1 以六篇中篇小说为基础,叙述了香港67年到13年..前四篇推理很赞,最后一篇的叙述手法意想不到
|
303 |
+
1 开山鼻祖哎
|
304 |
+
0 在白云機場猶豫了4個小時還是沒買...PDF版看得我希望自己看到的是网友写的版本。
|
305 |
+
1 爱上尼采啦!
|
306 |
+
1 详实历史背景与拉片技术分析,构图调度,长镜头与固定机位,沈从文朱天文与巴赞小津,浮云连缀的电影与文化的中间地带,用“台湾经验”解释侯孝贤的“中国性”。
|
307 |
+
0 勉强看了百分之八实在看不下去了 太墨迹了
|
308 |
+
1 对老五更感兴趣,确实写得好。
|
309 |
+
0 第一本扣人心弦,第二本开始啰嗦,第三本是用400页讲了一句话的故事。感觉头起的野心太大,最后气接不上了。
|
310 |
+
0 先告诉我这是设定集而不是小说可以吗。科幻小说卖设定和推理小说卖诡计一样的,看完让读者感到自己智力值level up了可以鄙视没看过这本书的人就可以了。嘻。anyway,你们开心就好咯
|
311 |
+
0 道理我懂了 但能不能好好说话?
|
312 |
+
1 以前贵金属时代,中国的利率都是很高的,几乎都在100%以上,货币紧缺
|
313 |
+
1 那篇演讲真不错
|
314 |
+
0 两星半,真是……又矫情又作
|
315 |
+
0 2005年4月2日 天津。谢谢ZY
|
316 |
+
0 专业术语都是跳着过,其余的文笔又有点差
|
317 |
+
1 飞机上读完,有几篇催泪。
|
318 |
+
0 大纲式的流水账。
|
319 |
+
1 比电影好看。会说故事的人。
|
320 |
+
1 作者的工科教育背景,使得他们得以从系统论和控制论的角度,全面剖析传统中国千百年来大一统格局的历史演变背后的深层次原因和奥秘。作者对中国政治、历史、文化的综合分析能力和开阔的学术研究视野让人叹服。在改革开放之后中国文科学人的学术性著作里,本书的原创性和开拓性也是首屈一指的。
|
321 |
+
1 内容翔实 非常有趣。|||断断续续三天读完了。强烈推荐!Implications:固步自封与世隔绝只能导致落后和愚昧 这对个人也成立 因此一定要保持与世界的同步;如果书中描述属实 十八九世纪的中国实在是面目可憎 简直让我政治不正确地认为的确需要一些来自外部的改造;但是因为一个国家或民族落后愚昧别的所谓先进国就有权利去征服它了吗?一个国家是否有落后愚昧的权利?我没有得出答案 因为就现在而言我认为最落后愚昧的国家之一就是朝鲜了而我着实很希望美国去打掉金胖的家族企业;的确直到今天中国的知识分子仍沉浸在天朝上国的莫名其妙优越感中 这点我自己也要反省。以前我完全没意识到马葛尔尼来中国的那年暴发了法国大革命 作者经常会插入叙述某事件发生时西方同时进行的事件 这种描述方式我非常喜欢。
|
322 |
+
1 都是大实话 或许好好遵守可以保证骨架不倒
|
323 |
+
1 地图的制图史,其实也是人类对世界认知过程,也反映了政治中心、传播媒介等的变迁,角度很棒,从中了解了很多之前没有注意到的关联,尤其是我们今日明天面对的地图形式,其实只是近两三百年的产物,着实让我震惊。本书缺点:大量的彩页地图插图,有标号,但在正文中却没有索引
|
324 |
+
1 N年前中学时看的书,好怀旧~
|
325 |
+
1 随意的生活和电影的组合
|
326 |
+
0 任何时候,极端分子我都挺讨厌,就算他站在我这边。。因为他不可能永远站在我这边
|
327 |
+
0 不及电子版。编辑去屎。
|
328 |
+
1 嘛嘛,言情部分乏善可陈,就当历史小说看噜w
|
329 |
+
0 不喜欢,读了1/3就读不下去了。感觉好无聊,一件事情可以讲10张纸,没耐心了都。
|
330 |
+
1 帮我搞清楚了城市化过程中农村土地的问题。很好懂。。
|
331 |
+
1 既有精彩的故事,又有旖丽的文字,读起来觉得身轻如燕。
|
332 |
+
0 开头还不错,让我大赞她对新风格的尝试和开拓进取精神。再往后看,发现不过是狗血情节大拼盘。这也大概是言情文的套路:女主必受诸多男主的追捧;深情男主仿佛跟踪狂,了解女主生活的细枝末节,又时不时在女主身边现身嘘寒问暖;可关键时候可能又智商全无的找不到女主,多少年后重逢,不是HE就是沧海桑田。
|
333 |
+
0 文很一般,但是谈书墨实在是极品
|
334 |
+
0 不喜欢女主,大事上不分是非,小事上锱铢必较
|
335 |
+
1 名字很文艺抽象,故事却是一部推理小说
|
336 |
+
1 吴晓波的叙述风格,把人物置于历史大背景之下。
|
337 |
+
1 这本我倒是很喜欢,后半截泼狗血的地方也看得很爽!嗯,果然悬疑是很重要的啊……不对,其实这个女主角我很喜欢,她总让我想起某个温柔沉默的同学。
|
338 |
+
1 有借鉴意义
|
339 |
+
0 笔风清丽,但是文风更显孤单,孤单到有消极的味道,反而并没有觉得的一个人的世界更美好
|
340 |
+
0 书名抄袭 不读也爽快给一星
|
341 |
+
1 太有学习价值了
|
342 |
+
0 瞎鸡巴书!
|
343 |
+
0 太一般 没看完
|
344 |
+
1 可读性强,故事切入点也抓的好,难为作者还花心思构思了若干女性角色。要再回头去重读《明朝那些事》对比一下大势,可能会对人物有更深的理解。
|
345 |
+
1 正如书名,我怕没有机会,选择真正喜欢的生活。发现笔者的想法跟自己好像,也许非常有欲望看这本书的人,都有相同的看法。羡慕笔者可以在对的时间遇见鱼自己频率相同的人。
|
346 |
+
0 书太粗线条了,要是没有这段历史的基础,根本就没法读,要是有这段历史的基础的话,也就没有再读他的必要了,还有就是中信出版社的典型毛病——装帧精美(我就是看到封皮才决定买的),翻译垃圾,句法不通,读的很累,并且有错误,本书为数不多的价值就是书前的地图和书后的皇帝列表,可惜列表还有错误(君士坦丁三世生君士坦丁二世,君士坦丁二世生君士坦丁四世),无语吧😄。同系列的还有两本,维京传奇和诺曼风云,也不想读了,耽误时间。
|
347 |
+
1 旅行不是在田径场里比赛,看谁更高更快更强,而是在超市里购物,每个人都能拿到自己喜欢的东西。
|
348 |
+
0 我觉得很囧,所以弃文了╮(╯_╰)╭
|
349 |
+
0 作者只想着磨字数赚钱了,吃相难看
|
350 |
+
0 还可以吧,8.8不是水军刷出来的难以相信啊
|
351 |
+
0 很台言范,读来很省心轻松,虽有阴谋波折,但最终肯定会是happy ending,长度适中,比较利于放松,文笔还ok。
|
352 |
+
1 看完电影再看小说 真的觉得小说是神作了
|
353 |
+
0 人物怎么就失控了
|
354 |
+
0 纯粹是肤浅的事实罗列,分析既不深入也不专业,论证也不严密,漏洞百出。如果作为一本娱乐书籍,还是有价值的。
|
355 |
+
0 呵呵
|
356 |
+
1 一口气看了下来,知道了我喜欢的电影的创作过程后,真的十分佩服和感动,以写出好的故事为目标,全心全力地前进。
|
357 |
+
1 作者爱看电影的理由 和我一样 有些看电影和认识的观点很新颖 应该是一个热爱生活 电影的人 你一定会在这本书里找的你喜欢的 如果你也爱看电影的话
|
358 |
+
0 看一章。齐。
|
359 |
+
1 图例片例相当多 很细致 不过显得有些啰嗦
|
360 |
+
0 一个垃圾的翻译会毁掉一本书
|
361 |
+
1 作为一本传记、随笔也是很好的,抛开电影而论。
|
362 |
+
1 无疑是看过的最好的奥斯特,我甚至可以百分百肯定奥斯特的老爹是天秤男……
|
363 |
+
0 二星,文章写了三分之二,男一和男二还没碰上,刚打算弃文,又碰上了。男一的身世也就算了,后来莫名给女主加了个身份。最后五分之一实在没法坚持,弃了。不过瞄了一眼结局,又被炸了一下。
|
364 |
+
0 小说最重要是讲好玩的故事好吗?形式大于内容
|
365 |
+
0 我觉得不好看,电视剧也奇丑。
|
366 |
+
0 太矫情了...没什么意思,脑子一热才买了
|
367 |
+
1 以前看兄弟连的时候,看到战场上一大兵居然在看这本书。我想那个时刻他一定会忘记了硝烟和枪声。
|
368 |
+
0 本来是因为关注了微博才在第一时间买的书。很失望。圈钱的书。作为网页上的旅行攻略也勉强算是凑活,作为书,作为建筑师,真是浪费名声了。
|
369 |
+
0 读书最烦两件事,一是絮叨,二是卖弄。这本全赶上了。
|
370 |
+
1 第一次读完弗兰妮,祖伊没读进去,昨天今天读完了。
|
371 |
+
1 挺实用的,虽然教授的内容论数量不多,但好就好在能够通过几个结合了实际操作的小事例激活自己脑海里在其他相关书目中所获得的纯理论知识。像是起到了某种催化剂的作用。稍有遗憾就是如果书中教授的事例再多一些,充分一些就好了。第八章拆析剧本读过后仿佛真有老师给你上了一课。
|
372 |
+
1 从“八月政变”到1991年12月戈尔巴乔夫发表退职演说之间那段时间,是真正改变苏联命运和世界面貌的五个月。
|
373 |
+
1 大师写大师,值得读读。若非托宾,我还不知敬爱的亨利·詹姆斯是gay.
|
374 |
+
0 承认你的好看,但痛恨你的抄袭,最垃圾的还是你的死不认账!特么跟安倍狗有什么区别!
|
375 |
+
0 乱写的
|
376 |
+
1 尼玛的长死了- =
|
377 |
+
0 作者边走边记录沿途感悟,杂乱零碎,加上翻译差,实在看不下去
|
378 |
+
0 女权毒,所有便宜女主都占了,却整不清她何德何能。所有男角都无私奉献,博取女性受众的妄想,即便是言情小说,还是想抓作者大力摇晃叫她醒醒。
|
379 |
+
0 屎!!!!!mb还敢给我推荐!!!!!!不看见了还不恶心,就是一坨屎!!!!!!!!!!二十几大元!!!!吃一坨屎也比这强!!!啊!!!!大头救命啊!!!!
|
380 |
+
0 烂校对+烂插图+烂排版=化金成泥
|
381 |
+
0 虽然比较散碎,但还是很有意思的!
|
382 |
+
1 为啥我看到谁谁谁留在了大陆,就想到这人之后肯定很惨。这本书让我看到那个时代的许多无赖,也让我看到了真正的大师。
|
383 |
+
1 应该是今年最后一本了。
|
384 |
+
0 玛丽苏
|
385 |
+
1 仰慕作者啊,可惜死掉了。
|
386 |
+
1 太好看了吧!!!强烈推荐啊!
|
387 |
+
0 翻译不好,内容也不见得如何。
|
388 |
+
1 硬货!对于电影爱好者来说很开脑洞长知识,但是有点过于专业。
|
389 |
+
0 本来不想有失公允地评分,但是今天唐七居然否认抄袭我实在太气愤了,我要把她所有出版书和衍生作品打一星
|
390 |
+
0 我其实就是脑补窦骁了一下 一般般吧
|
391 |
+
0 应该不是韩父代笔吧,如果一个中年人写出这么一本书,那水平也太渣了。2星。
|
392 |
+
1 这三本看完用了两个多月。这也让出版,,,大师远去再无大师。。。把XX说得像五胡乱华。。。
|
393 |
+
0 为什么一本天雷清穿玛丽苏会被捧这么高?
|
394 |
+
1 很早之前看过的书,让我对这位导演有了更进一步的了解。听他慢慢讲的那些事一段时间很鼓舞我。。。对这条路隐约觉得不容易但内心充满希望。
|
395 |
+
1 引人入胜的情节,意味深长的结尾。
|
396 |
+
1 伟大的写作是双性同体的(打印着读的 )
|
397 |
+
0 翻了两页就读不下去了,内容完全和豆瓣简介不符!!虽然名字就已经暴露了其实就是个口水鸡汤,然而简介却误导我以为是什么经济学社科……
|
398 |
+
0 读到一半,才觉得有意思。
|
399 |
+
1 NEMO推荐
|
400 |
+
0 读完发现读读雪球版导读就够了,并且只需要读其中“学经济家”的那部分就行了。当然,像我这种事先不知道非要读一下全本,当成故事会读读也无可厚非(当然里面也会有不错的知识点,只是我现在对于这类宏观理论直接无视了)。最后,行天丰雄补写的东西,让我觉得我们人行目前的所作所为都是对的啊,比如供给侧、去产能、国企混改、地方政府财政强化、反腐败,他说的我们基本全做了。当然,结局如何,慢慢边走边看吧。
|
401 |
+
1 记得前一阵贵鞋说起这本作品的时候吐槽了一下动机,结合终章的话我倒觉得挺合情合理。逻辑推演非常不错,相当本格的作品啊。
|
402 |
+
0 这本要看
|
403 |
+
1 “陀思妥耶夫斯基是我有所受益的唯一的心理学家:他属于我生命中最美好的幸事之列,甚至超过了对司汤达的发现。这个深刻的人——他十分正确地对肤浅的德国人采取了鄙视的态度……”——尼采
|
404 |
+
0 雷点丛生啊
|
405 |
+
1 除了小武剧本还收录了贾樟柯之前的一部短片剧本,包括一些的采访和文章,有干货,很受教。
|
406 |
+
1 很期待
|
407 |
+
0 桐华没有塑造令人印象深刻的人物的能力,这是我看不下去她小说的一个重要原因
|
408 |
+
0 看到这种书就烦,一丝都没有经济学该有的严谨和审慎
|
409 |
+
0 在写教材么?
|
410 |
+
1 我喜欢黑暗系的故事!
|
411 |
+
0 不是很喜欢,我讨厌这种不疼不痒的书
|
412 |
+
1 感觉漓江出版社的这个系列做得很认真。
|
413 |
+
0 我一直深信宋鸿兵是地摊文学的搬运工,如果他所言的银行家背地里有这么一套,他写了这么多大卖的货币战争,估计早就被银行家搞死了。还会出现站台被打事件吗
|
414 |
+
0 30%弃了
|
415 |
+
1 辛老师在本书中的四个字足以概括其才华与气魄:后出转精。
|
416 |
+
1 看错了了书,借了最初的版本,离考试还剩十天的时候在八楼南发现了这一本,天注定。
|
417 |
+
1 做豆腐的人去做咖喱饭或炸猪排,不可能好吃。
|
418 |
+
1 把我的灯拿来
|
419 |
+
1 还记得这本书是从一位研究生学长处借过来读的
|
420 |
+
0 整本书以观点罗列为主,逻辑问题太多
|
421 |
+
0 APP看的,目录好像是对不上,,难道有错���= -,因为这爱的作的折腾的实在莫名其妙啊
|
422 |
+
0 看到她的天使投资老爸投给她40万开旅馆后,我的梦就不再做下去了。
|
423 |
+
0 不明觉厉
|
424 |
+
1 流畅翔实,但也有些平庸。4.5,立场好像略微偏共
|
425 |
+
1 没有想象的好看
|
426 |
+
1 前半冗长,后半颇可观。
|
427 |
+
0 尝试看了2遍 都弃了。。。
|
428 |
+
1 雨果
|
429 |
+
0 这么幼稚,玛丽苏,小白,没逻辑的小学生作品我是怎么忍受下去得呢?
|
430 |
+
0 逻辑恐怖
|
431 |
+
0 抄袭零分抄袭bl负分
|
432 |
+
0 像是一本博客的合集,其中一部分章节写的还不错
|
433 |
+
0 看过簪中录之后,试着看了侧侧轻寒的其他小说~~可以说都不是特别喜欢,特别是这一本~~我真的是以便鼓励自己一边看完的~脑残偶像剧情节太重了~~完全感觉不出和写簪中录的大大是同一个人!!!总之,非常失望!
|
434 |
+
1 2017重读,挤掉水分很有用,就是太美国了
|
435 |
+
1 从1996年到2008年,这个人对于电影的敬畏和热情好像一直都没有变过。他说,电影是他接近自由的一种方式。喜欢封面选的这张照片,看完书,就觉得贾樟柯平时做的最多的动作可能就是这个。
|
436 |
+
0 这样的书,骂什么脏字都不为过。明年诺奖还不是你的!凑表脸!
|
437 |
+
0 至今每一本大冰的书都看了,也算感谢他当初给我的悸动了。但现在沦为一位在书中为自己辩解和洗白的人,大冰算是很卖力了,整本书重复了好多遍所谓“平行世界”的话,也重复了很多遍“你们要看完才有发言权哦”之类的求人看下去的话。无疑很多人还是很羡慕大冰的生活,包括我,洗脑了吧,这种文字看看就好吧。好吗?好的。
|
438 |
+
1 电影符号学
|
439 |
+
0 国产评分向来不靠谱,这本明显是针对初中生写的超现实(扯淡)推理
|
440 |
+
0 天雷滚滚。真不知道为什么家里的妹子会被这种东西感动。
|
441 |
+
0 小白文,看着耍
|
442 |
+
0 高于1星半。①情节平淡、单薄,人物单薄。女主妈妈得麻风病上岛、女主和姐姐、女主得麻风病上岛、女主治好麻风病下岛、女主姐姐的女儿……越到后面越没耐心越觉得无聊。②除姐姐妹妹部分的其他情节太平淡太单薄,而姐姐妹妹部分更是糟糕、烂大街、狗血、老套:“一不做二不休”的姐妹,姐姐是不做,妹妹是不休。坏心、恶毒、嫉妒心极强、虚荣、爱钱、傍大款的美丽姐姐;真善美哪哪都是好好好没有缺点的妹妹;姐姐抢妹妹男人。姐姐妹妹的情节就只有这些。③女主的2位恋人,前一个是上了几十个处女(招妓就更不用说)的滥交渣男,后一个是比她大20岁的老男人。本来人物就很单薄,留给女主的还那么差。④内容的叙述也就如一开始的“讲故事”的平铺直叙讲述,没有精彩小说该有的令人身临其境的叙述方式。⑤“令整个欧洲潸然泪下……”,真夸张。
|
443 |
+
0 不喜欢这个小说,好像就是历史一样的说教。女主没看到多少自主性,好像就是跟着即使走而已,即使让她爱谁就爱谁。
|
444 |
+
0 读起来太费劲了,前半段耐着性子读了,后半段草草读过。。。
|
445 |
+
0 一流选题,三流内容。论文式写作,文字难有令人灵光乍现的部分。
|
446 |
+
1 我竟然读完了,恩,我要去读第二遍了
|
447 |
+
1 看得我看什么都像类型片……
|
448 |
+
1 以色列真是一片神奇的土地,简直是Nothing is impossible的最好注解呀。反观中国,缺制度、缺文化、缺技能,辽阔疆土却成长不出创新的种子,注定贫瘠、贫乏、毫无生机。。。。杯具
|
449 |
+
1 所有人都能读,所有人都该读,我们上课学的历史是小说。
|
450 |
+
1 一个荒诞离奇匪夷所思的故事。年少时候的张扬与轻狂,往往会变成生命中的错与伤:我们平凡却可贵的人生,错了不会再重来。
|
451 |
+
1 电视剧很好看,没想到小说也那么精彩,堪称经典。
|
452 |
+
1 令人映像深刻的是那些挂着灿烂笑容的中国儿童兵~
|
453 |
+
1 小时候看电影 觉得香港的一切都是好的 可惜我是个悲观的小孩 觉得我一辈子都没有机会去香港 长大了 却跑到了地球的另一半 没有时间去香港 总有一天我会去香港 看看电影里的街头小巷
|
454 |
+
0 除了装帧设计好看,无一好看。作者文笔小学生水平,时常是流水账状态。
|
455 |
+
0 “很黄很暴力,恶心倒胃口。” http://book.douban.com/review/6486508/
|
456 |
+
1 浪漫主义啊,只是,看完后觉得有好多细节并没有解释清楚。哈迪斯那时对安非说的话,让我以为莫凉在曾经的那个神话中也会有一席之地,可是,总觉得有点虎头蛇尾的嫌疑~
|
457 |
+
0 因罗振宇不负责的坑爹点评,再减一星!
|
458 |
+
0 情节不清晰,故事整个编的天花乱坠,钱像扔沙子是的写;那富丽堂皇想什么就有什么。感觉里面的人除了女主都是一个外星人一样,可以看穿人的内心;尼玛,哪有这么简单。这种完美就反衬人物不鲜明,虚得太多。文章也就这样。
|
459 |
+
1 卡尔维诺式的卡尔维诺只能是卡尔维诺
|
460 |
+
1 戴在国内应该算是蛮不错的学者。国内的电影理论圈的众多学者,仅仅还停留在理解国外理论的水平。对于第二符号学来说,国内连理解的学者都没几个。基本只是套用精神分析的术语,解读思路还是自己固有的那一套。戴老师也不能免俗。
|
461 |
+
0 男主傻逼。
|
462 |
+
1 好书,应该去读英文原文,把英语给好好学学。十二个案例的第一个就是2015年的6月的中国股市,
|
463 |
+
1 下午三点,不早不晚
|
464 |
+
1 看起来有点吃力,数学底子不够好。
|
465 |
+
0 看来还是不能盲目轻信友邻安利
|
466 |
+
0 只看了几个故事,总觉得感觉怪怪,这明明是打着丽江旗号的爱情小说集子么,最重要的是,这些小说还写得如此之烂
|
467 |
+
1 可以说很精彩了。
|
468 |
+
1 已购。粉丝向,内容还是有点单薄。异形第一部是小时候看的录像带,几乎要被吓死了,尤其看到抱脸虫,现在还有阴影。。。吓得我后面几部至今没敢看。。。
|
469 |
+
0 啊看不下去
|
470 |
+
0 以中东为中心,描述世界历史。多少有点牵强,中国是个打酱油的角色。翻译不喜欢,勉强看完了。
|
471 |
+
0 日记体 没深度
|
472 |
+
0 中二病可以醒醒了哦。
|
473 |
+
1 真是很有意思的人呢
|
474 |
+
0 无趣、无聊!竟然起了一个这么费正清式的名字,坑爹不浅呀
|
475 |
+
1 听奥胖口无遮拦地谈论各种八卦轶事真是有趣,但看到最后雅格洛后记里孤独的奥胖却让我险些落下泪来。
|
476 |
+
1 本书除了延续了丹·布朗一贯知识丰富和情结紧凑的特点之外,还探讨了人和神,宗教和科学等大命题,绝对经典,看得我拍案惊呼。
|
477 |
+
1 在陕西汉中留坝书房读完,喝着卡布提诺,烤着火,美美的懒懒的,内容就没那么轻松,以色列建国不容易,巴勒斯坦阿拉伯人更不容易,历史造成的悲剧没人能解决,最后恐怕还得历史来解决。
|
478 |
+
1 用诗意的文字来书写血腥内战中的救赎与人性。 好的译笔让人融入其中,荡气回肠。译后记也相当凝练。
|
479 |
+
1 购买正版电子书生涯开始,kindle app在iPad上的阅读体验也甩其他同类app几条街!
|
480 |
+
0 很适合翻拍影视剧,该有的套路都有,就是看不到真情
|
481 |
+
1 看一个喜欢的导演讲自己怎么拍戏总是觉得很有趣,尤其是现在几乎不看DVD,也就失去了看导演讲解音轨的机会。
|
482 |
+
0 突然有一天有人依靠着经济发财了,于是一大堆人跟进发经济财,再后来有个折腾文字的人也去凑经济的热闹
|
483 |
+
0 弃了,读点儿有用的吧,加油
|
484 |
+
0 【这个故事说的是一个无意中穿越的女人犯了一种叫玛丽苏的病经过不懈努力终于把自己活活虐死的故事】妈的我就不总结了……豆友的这句实在是不能更经典了……严格说去掉女猪,只看桐华女士写的二月河九龙夺嫡同人,倒是篇有意思的东西,可惜清穿这题材决定了她玩儿命的把女主插进去并让她影响每件大事的进程,崩坏也就可以想见……
|
485 |
+
1 短经典系列也读了不少了,萨冈本人的故事就很精彩了,而这本集子里所收录的内容也非常有趣,好的短篇小说读完总有意犹未尽之感,那些生活中小小的细节,小小的忧愁,婚姻,两性,出轨,绝症,对于生活的绝望。。。对于任何一个阶级的人都会产生如石头落入池塘中的涟漪吧。很多事别人不觉得有什么大不了,可是敏感的人却会想很多,我觉得萨冈真是把握了很多人们这种心理,没什么波澜起伏的故事,硬生生的却叫人感同身受了。也可能这的确是更适合心思敏感的人看的书吧。
|
486 |
+
0 这对儿没什么大意思。
|
487 |
+
0 嗯 我又重新学习了中央银行的各项职能 书名直接用副标题更好
|
488 |
+
1 波澜壮阔,细致入微。
|
489 |
+
0 没看过.看题目就是个废
|
490 |
+
0 n63不如看原典
|
491 |
+
1 唏嘘不已
|
492 |
+
1 刚开始看的昏昏欲睡,这牛逼也吹的太响当当了,后来小说渐入佳境,倪匡没有忽悠可怜的读者,这绝逼是所谓的终生小说,武侠小说的终极也不过如此了。。。看了九把刀的新书《价值连城的幸运》后面九把刀的访谈,好像他和张大春有点过节,不知道哪位能给我科普一下。。。。。
|
493 |
+
0 总是在重复那几句不痛不痒的话,无味。拓宽知识量看着玩玩。
|
494 |
+
0 不知所言╮( ̄▽ ̄")╭
|
495 |
+
1 还行吧,不错
|
496 |
+
1 本书汇聚了1978年以来中国改革开放带来的巨大变化,经济改革是主线,展现了各类经济体在政策浪潮下的前行,生动刻画了村镇企业、国营企业、个体户、挂靠集体企业的红帽子企业,合资企业的发展和其代表人物,既有政治上的宦海浮沉,又有普通小老百姓精神风貌的变化,实在是一部不可多得的小说!
|
497 |
+
0 无法摆脱的虚假和做作感 越往后越甚
|
498 |
+
1 不错,中间人物关系镜头运用的分析,特别清晰精彩。。。。。。。。。。。。。。。。。。。。。。。。。。。。��。。。。。。。。。。。。。。。。。。。。。。。。。
|
499 |
+
0 天子谋同一作者,但这本没看进去。人设就相当诡异,出场人物暗设看不懂,关系很乱。继续看也许会看懂。
|
500 |
+
1 零散不精炼,但有借鉴意义。
|
501 |
+
0 努力了两次,最终还是跟这书漫长的告别了
|
train/documents/llama.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
## Training
|
3 |
+
|
4 |
+
1. Clone the TencentPretrain project and install dependencies: PyTorch, DeepSpeed, SentencePiece
|
5 |
+
|
6 |
+
```
|
7 |
+
git clone https://github.com/Tencent/TencentPretrain.git
|
8 |
+
```
|
9 |
+
|
10 |
+
2. Convert LLaMA-7B weights to TencentPretrain format
|
11 |
+
|
12 |
+
```
|
13 |
+
cd TencentPretrain
|
14 |
+
|
15 |
+
python3 scripts/convert_llama_to_tencentpretrain.py --input_model_path $LLaMA_7B_FOLDER/consolidated.00.pth --output_model_path models/llama-7b.bin --layers_num 32
|
16 |
+
```
|
17 |
+
|
18 |
+
3. Modify configuration file
|
19 |
+
|
20 |
+
Check out the `tencentpretrain/utils/constants.py` file, and modify L4: `special_tokens_map.json` to `llama_special_tokens_map.json`
|
21 |
+
|
22 |
+
4. Data preprocess. We use the example corpus in the project for pre-training, one can also use custom data training in the same format (one sample per line).
|
23 |
+
|
24 |
+
```
|
25 |
+
python3 preprocess.py --corpus_path corpora/book_review.txt --spm_model_path $LLaMA_7B_FOLDER/tokenizer.model \
|
26 |
+
--dataset_path dataset.pt --processes_num 8 --data_processor lm
|
27 |
+
```
|
28 |
+
|
29 |
+
5. Start training.
|
30 |
+
|
31 |
+
```
|
32 |
+
deepspeed pretrain.py --deepspeed --deepspeed_config models/deepspeed_config.json \
|
33 |
+
--pretrained_model_path models/llama-7b.bin \
|
34 |
+
--dataset_path dataset.pt --spm_model_path $LLaMA_7B_FOLDER/tokenizer.model \
|
35 |
+
--config_path models/llama/7b_config.json \
|
36 |
+
--output_model_path models/output_model.bin \
|
37 |
+
--world_size 8 --learning_rate 1e-4 \
|
38 |
+
--data_processor lm --total_steps 10000 --save_checkpoint_steps 2000 --batch_size 24
|
39 |
+
```
|
40 |
+
|
41 |
+
## Inference
|
42 |
+
|
43 |
+
Similar to facebookresearch/llama, TencentPretrain also provides language model inference code.
|
44 |
+
For example, using a single GPU for LLaMA-7B inference, the prompt is in the file `beginning.txt`:
|
45 |
+
|
46 |
+
```
|
47 |
+
python3 scripts/generate_lm.py --load_model_path models/llama-7b.bin --spm_model_path $LLaMA_7B_FOLDER/tokenizer.model \
|
48 |
+
--test_path beginning.txt --prediction_path generated_sentence.txt \
|
49 |
+
--config_path models/llama/7b_config.json
|
50 |
+
```
|
51 |
+
|
52 |
+
For now, TencentPretrain only support LLaMA-7B training. We are working on our framework to support LLaMA model training/fine-tuning at all scales and sharing more experimental results.
|
train/finetune/run_c3.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for C3 (a multiple choice dataset).
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
import random
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
|
12 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
13 |
+
sys.path.append(tencentpretrain_dir)
|
14 |
+
|
15 |
+
from tencentpretrain.embeddings import *
|
16 |
+
from tencentpretrain.encoders import *
|
17 |
+
from tencentpretrain.utils.constants import *
|
18 |
+
from tencentpretrain.utils import *
|
19 |
+
from tencentpretrain.utils.optimizers import *
|
20 |
+
from tencentpretrain.utils.config import load_hyperparam
|
21 |
+
from tencentpretrain.utils.seed import set_seed
|
22 |
+
from tencentpretrain.utils.logging import init_logger
|
23 |
+
from tencentpretrain.model_saver import save_model
|
24 |
+
from tencentpretrain.opts import finetune_opts, tokenizer_opts, adv_opts
|
25 |
+
from finetune.run_classifier import build_optimizer, load_or_initialize_parameters, train_model, batch_loader, evaluate
|
26 |
+
|
27 |
+
|
28 |
+
class MultipleChoice(nn.Module):
|
29 |
+
def __init__(self, args):
|
30 |
+
super(MultipleChoice, self).__init__()
|
31 |
+
self.embedding = Embedding(args)
|
32 |
+
for embedding_name in args.embedding:
|
33 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
34 |
+
self.embedding.update(tmp_emb, embedding_name)
|
35 |
+
self.encoder = str2encoder[args.encoder](args)
|
36 |
+
self.dropout = nn.Dropout(args.dropout)
|
37 |
+
self.output_layer = nn.Linear(args.hidden_size, 1)
|
38 |
+
|
39 |
+
def forward(self, src, tgt, seg, soft_tgt=None):
|
40 |
+
"""
|
41 |
+
Args:
|
42 |
+
src: [batch_size x choices_num x seq_length]
|
43 |
+
tgt: [batch_size]
|
44 |
+
seg: [batch_size x choices_num x seq_length]
|
45 |
+
"""
|
46 |
+
|
47 |
+
choices_num = src.shape[1]
|
48 |
+
|
49 |
+
src = src.view(-1, src.size(-1))
|
50 |
+
seg = seg.view(-1, seg.size(-1))
|
51 |
+
|
52 |
+
# Embedding.
|
53 |
+
emb = self.embedding(src, seg)
|
54 |
+
# Encoder.
|
55 |
+
output = self.encoder(emb, seg)
|
56 |
+
output = self.dropout(output)
|
57 |
+
logits = self.output_layer(output[:, 0, :])
|
58 |
+
reshaped_logits = logits.view(-1, choices_num)
|
59 |
+
|
60 |
+
if tgt is not None:
|
61 |
+
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(reshaped_logits), tgt.view(-1))
|
62 |
+
return loss, reshaped_logits
|
63 |
+
else:
|
64 |
+
return None, reshaped_logits
|
65 |
+
|
66 |
+
|
67 |
+
def read_dataset(args, path):
|
68 |
+
|
69 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
70 |
+
data = json.load(f)
|
71 |
+
|
72 |
+
examples = []
|
73 |
+
for i in range(len(data)):
|
74 |
+
for j in range(len(data[i][1])):
|
75 |
+
example = ["\n".join(data[i][0]).lower(), data[i][1][j]["question"].lower()]
|
76 |
+
for k in range(len(data[i][1][j]["choice"])):
|
77 |
+
example += [data[i][1][j]["choice"][k].lower()]
|
78 |
+
for k in range(len(data[i][1][j]["choice"]), args.max_choices_num):
|
79 |
+
example += ["No Answer"]
|
80 |
+
|
81 |
+
example += [data[i][1][j].get("answer", "").lower()]
|
82 |
+
|
83 |
+
examples += [example]
|
84 |
+
|
85 |
+
dataset = []
|
86 |
+
for i, example in enumerate(examples):
|
87 |
+
tgt = 0
|
88 |
+
for k in range(args.max_choices_num):
|
89 |
+
if example[2 + k] == example[6]:
|
90 |
+
tgt = k
|
91 |
+
dataset.append(([], tgt, []))
|
92 |
+
|
93 |
+
for k in range(args.max_choices_num):
|
94 |
+
|
95 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(example[k + 2]) + [SEP_TOKEN])
|
96 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(example[1]) + [SEP_TOKEN])
|
97 |
+
src_c = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(example[0]) + [SEP_TOKEN])
|
98 |
+
|
99 |
+
src = src_a + src_b + src_c
|
100 |
+
seg = [1] * (len(src_a) + len(src_b)) + [2] * len(src_c)
|
101 |
+
|
102 |
+
if len(src) > args.seq_length:
|
103 |
+
src = src[: args.seq_length]
|
104 |
+
seg = seg[: args.seq_length]
|
105 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
106 |
+
while len(src) < args.seq_length:
|
107 |
+
src.append(PAD_ID)
|
108 |
+
seg.append(0)
|
109 |
+
|
110 |
+
dataset[-1][0].append(src)
|
111 |
+
dataset[-1][2].append(seg)
|
112 |
+
|
113 |
+
return dataset
|
114 |
+
|
115 |
+
|
116 |
+
def main():
|
117 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
118 |
+
|
119 |
+
finetune_opts(parser)
|
120 |
+
|
121 |
+
parser.add_argument("--max_choices_num", default=4, type=int,
|
122 |
+
help="The maximum number of cadicate answer, shorter than this will be padded.")
|
123 |
+
|
124 |
+
tokenizer_opts(parser)
|
125 |
+
|
126 |
+
adv_opts(parser)
|
127 |
+
|
128 |
+
args = parser.parse_args()
|
129 |
+
args.labels_num = args.max_choices_num
|
130 |
+
|
131 |
+
# Load the hyperparameters from the config file.
|
132 |
+
args = load_hyperparam(args)
|
133 |
+
|
134 |
+
set_seed(args.seed)
|
135 |
+
|
136 |
+
# Build tokenizer.
|
137 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
138 |
+
|
139 |
+
# Build multiple choice model.
|
140 |
+
model = MultipleChoice(args)
|
141 |
+
|
142 |
+
# Load or initialize parameters.
|
143 |
+
load_or_initialize_parameters(args, model)
|
144 |
+
|
145 |
+
# Get logger.
|
146 |
+
args.logger = init_logger(args)
|
147 |
+
|
148 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
149 |
+
model = model.to(args.device)
|
150 |
+
|
151 |
+
# Training phase.
|
152 |
+
trainset = read_dataset(args, args.train_path)
|
153 |
+
instances_num = len(trainset)
|
154 |
+
batch_size = args.batch_size
|
155 |
+
|
156 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
157 |
+
|
158 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
159 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
160 |
+
|
161 |
+
optimizer, scheduler = build_optimizer(args, model)
|
162 |
+
|
163 |
+
if args.fp16:
|
164 |
+
try:
|
165 |
+
from apex import amp
|
166 |
+
except ImportError:
|
167 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
168 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
169 |
+
args.amp = amp
|
170 |
+
|
171 |
+
if torch.cuda.device_count() > 1:
|
172 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
173 |
+
model = torch.nn.DataParallel(model)
|
174 |
+
args.model = model
|
175 |
+
|
176 |
+
if args.use_adv:
|
177 |
+
args.adv_method = str2adv[args.adv_type](model)
|
178 |
+
|
179 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
180 |
+
|
181 |
+
args.logger.info("Start training.")
|
182 |
+
|
183 |
+
for epoch in range(1, args.epochs_num + 1):
|
184 |
+
random.shuffle(trainset)
|
185 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
186 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
187 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
188 |
+
|
189 |
+
model.train()
|
190 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
191 |
+
|
192 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
193 |
+
total_loss += loss.item()
|
194 |
+
|
195 |
+
if (i + 1) % args.report_steps == 0:
|
196 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
197 |
+
total_loss = 0.0
|
198 |
+
|
199 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
200 |
+
if result[0] > best_result:
|
201 |
+
best_result = result[0]
|
202 |
+
save_model(model, args.output_model_path)
|
203 |
+
|
204 |
+
# Evaluation phase.
|
205 |
+
if args.test_path is not None:
|
206 |
+
args.logger.info("Test set evaluation.")
|
207 |
+
if torch.cuda.device_count() > 1:
|
208 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
209 |
+
else:
|
210 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
211 |
+
evaluate(args, read_dataset(args, args.test_path))
|
212 |
+
|
213 |
+
|
214 |
+
if __name__ == "__main__":
|
215 |
+
main()
|
train/finetune/run_chid.py
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for ChID (a multiple choice dataset).
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
import random
|
9 |
+
import torch
|
10 |
+
|
11 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
12 |
+
sys.path.append(tencentpretrain_dir)
|
13 |
+
|
14 |
+
from tencentpretrain.utils.constants import *
|
15 |
+
from tencentpretrain.utils.tokenizers import *
|
16 |
+
from tencentpretrain.utils.optimizers import *
|
17 |
+
from tencentpretrain.utils import *
|
18 |
+
from tencentpretrain.utils.config import load_hyperparam
|
19 |
+
from tencentpretrain.utils.seed import set_seed
|
20 |
+
from tencentpretrain.utils.logging import init_logger
|
21 |
+
from tencentpretrain.model_saver import save_model
|
22 |
+
from tencentpretrain.opts import finetune_opts, adv_opts
|
23 |
+
from finetune.run_c3 import MultipleChoice
|
24 |
+
from finetune.run_classifier import build_optimizer, load_or_initialize_parameters, train_model, batch_loader, evaluate
|
25 |
+
|
26 |
+
|
27 |
+
def tokenize_chid(text):
|
28 |
+
output = []
|
29 |
+
first_idiom = True
|
30 |
+
while True:
|
31 |
+
if first_idiom:
|
32 |
+
idiom_index = text.find("#idiom")
|
33 |
+
output.extend(text[:idiom_index])
|
34 |
+
output.append(text[idiom_index : idiom_index + 13])
|
35 |
+
pre_idiom_index = idiom_index
|
36 |
+
first_idiom = False
|
37 |
+
else:
|
38 |
+
if text[idiom_index + 1 :].find("#idiom") == -1:
|
39 |
+
output.extend(text[pre_idiom_index + 13 :])
|
40 |
+
break
|
41 |
+
else:
|
42 |
+
idiom_index = idiom_index + 1 + text[idiom_index + 1 :].find("#idiom")
|
43 |
+
output.extend(text[pre_idiom_index + 13 : idiom_index])
|
44 |
+
output.append(text[idiom_index : idiom_index + 13])
|
45 |
+
pre_idiom_index = idiom_index
|
46 |
+
|
47 |
+
return output
|
48 |
+
|
49 |
+
|
50 |
+
def add_tokens_around(tokens, idiom_index, tokens_num):
|
51 |
+
left_tokens_num = tokens_num // 2
|
52 |
+
right_tokens_num = tokens_num - left_tokens_num
|
53 |
+
|
54 |
+
if idiom_index >= left_tokens_num and (len(tokens) - 1 - idiom_index) >= right_tokens_num:
|
55 |
+
left_tokens = tokens[idiom_index - left_tokens_num : idiom_index]
|
56 |
+
right_tokens = tokens[idiom_index + 1 : idiom_index + 1 + right_tokens_num]
|
57 |
+
elif idiom_index < left_tokens_num:
|
58 |
+
left_tokens = tokens[:idiom_index]
|
59 |
+
right_tokens = tokens[idiom_index + 1 : idiom_index + 1 + tokens_num - len(left_tokens)]
|
60 |
+
elif (len(tokens) - 1 - idiom_index) < right_tokens_num:
|
61 |
+
right_tokens = tokens[idiom_index + 1 :]
|
62 |
+
left_tokens = tokens[idiom_index - (tokens_num - len(right_tokens)) : idiom_index]
|
63 |
+
|
64 |
+
return left_tokens, right_tokens
|
65 |
+
|
66 |
+
|
67 |
+
def read_dataset(args, data_path, answer_path):
|
68 |
+
if answer_path is not None:
|
69 |
+
answers = json.load(open(answer_path))
|
70 |
+
dataset = []
|
71 |
+
max_tokens_for_doc = args.seq_length - 3
|
72 |
+
group_index = 0
|
73 |
+
|
74 |
+
for line in open(data_path, mode="r", encoding="utf-8"):
|
75 |
+
example = json.loads(line)
|
76 |
+
options = example["candidates"]
|
77 |
+
for context in example["content"]:
|
78 |
+
chid_tokens = tokenize_chid(context)
|
79 |
+
tags = [token for token in chid_tokens if "#idiom" in token]
|
80 |
+
for tag in tags:
|
81 |
+
if answer_path is not None:
|
82 |
+
tgt = answers[tag]
|
83 |
+
else:
|
84 |
+
tgt = -1
|
85 |
+
tokens = []
|
86 |
+
for i, token in enumerate(chid_tokens):
|
87 |
+
if "#idiom" in token:
|
88 |
+
sub_tokens = [str(token)]
|
89 |
+
else:
|
90 |
+
sub_tokens = args.tokenizer.tokenize(token)
|
91 |
+
for sub_token in sub_tokens:
|
92 |
+
tokens.append(sub_token)
|
93 |
+
idiom_index = tokens.index(tag)
|
94 |
+
left_tokens, right_tokens = add_tokens_around(tokens, idiom_index, max_tokens_for_doc - 1)
|
95 |
+
|
96 |
+
for i in range(len(left_tokens)):
|
97 |
+
if "#idiom" in left_tokens[i] and left_tokens[i] != tag:
|
98 |
+
left_tokens[i] = MASK_TOKEN
|
99 |
+
for i in range(len(right_tokens)):
|
100 |
+
if "#idiom" in right_tokens[i] and right_tokens[i] != tag:
|
101 |
+
right_tokens[i] = MASK_TOKEN
|
102 |
+
|
103 |
+
dataset.append(([], tgt, [], tag, group_index))
|
104 |
+
|
105 |
+
for option in options:
|
106 |
+
option_tokens = args.tokenizer.tokenize(option)
|
107 |
+
tokens = [CLS_TOKEN] + option_tokens + [SEP_TOKEN] + left_tokens + [SEP_TOKEN] + right_tokens + [SEP_TOKEN]
|
108 |
+
|
109 |
+
src = args.tokenizer.convert_tokens_to_ids(tokens)[: args.seq_length]
|
110 |
+
seg = [0] * len(src)
|
111 |
+
|
112 |
+
while len(src) < args.seq_length:
|
113 |
+
src.append(0)
|
114 |
+
seg.append(0)
|
115 |
+
|
116 |
+
dataset[-1][0].append(src)
|
117 |
+
dataset[-1][2].append(seg)
|
118 |
+
|
119 |
+
while len(dataset[-1][0]) < args.max_choices_num:
|
120 |
+
dataset[-1][0].append([0] * args.seq_length)
|
121 |
+
dataset[-1][2].append([0] * args.seq_length)
|
122 |
+
group_index += 1
|
123 |
+
|
124 |
+
return dataset
|
125 |
+
|
126 |
+
|
127 |
+
def main():
|
128 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
129 |
+
|
130 |
+
finetune_opts(parser)
|
131 |
+
|
132 |
+
parser.add_argument("--vocab_path", default=None, type=str,
|
133 |
+
help="Path of the vocabulary file.")
|
134 |
+
parser.add_argument("--spm_model_path", default=None, type=str,
|
135 |
+
help="Path of the sentence piece model.")
|
136 |
+
parser.add_argument("--train_answer_path", type=str, required=True,
|
137 |
+
help="Path of the answers for trainset.")
|
138 |
+
parser.add_argument("--dev_answer_path", type=str, required=True,
|
139 |
+
help="Path of the answers for devset.")
|
140 |
+
|
141 |
+
parser.add_argument("--max_choices_num", default=10, type=int,
|
142 |
+
help="The maximum number of cadicate answer, shorter than this will be padded.")
|
143 |
+
|
144 |
+
adv_opts(parser)
|
145 |
+
|
146 |
+
args = parser.parse_args()
|
147 |
+
|
148 |
+
args.labels_num = args.max_choices_num
|
149 |
+
|
150 |
+
# Load the hyperparameters from the config file.
|
151 |
+
args = load_hyperparam(args)
|
152 |
+
|
153 |
+
set_seed(args.seed)
|
154 |
+
|
155 |
+
# Build tokenizer.
|
156 |
+
args.tokenizer = CharTokenizer(args)
|
157 |
+
|
158 |
+
# Build multiple choice model.
|
159 |
+
model = MultipleChoice(args)
|
160 |
+
|
161 |
+
# Load or initialize parameters.
|
162 |
+
load_or_initialize_parameters(args, model)
|
163 |
+
|
164 |
+
# Get logger.
|
165 |
+
args.logger = init_logger(args)
|
166 |
+
|
167 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
168 |
+
model = model.to(args.device)
|
169 |
+
|
170 |
+
# Training phase.
|
171 |
+
trainset = read_dataset(args, args.train_path, args.train_answer_path)
|
172 |
+
instances_num = len(trainset)
|
173 |
+
batch_size = args.batch_size
|
174 |
+
|
175 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
176 |
+
|
177 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
178 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
179 |
+
|
180 |
+
optimizer, scheduler = build_optimizer(args, model)
|
181 |
+
|
182 |
+
if args.fp16:
|
183 |
+
try:
|
184 |
+
from apex import amp
|
185 |
+
except ImportError:
|
186 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
187 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
188 |
+
args.amp = amp
|
189 |
+
|
190 |
+
if torch.cuda.device_count() > 1:
|
191 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
192 |
+
model = torch.nn.DataParallel(model)
|
193 |
+
args.model = model
|
194 |
+
|
195 |
+
if args.use_adv:
|
196 |
+
args.adv_method = str2adv[args.adv_type](model)
|
197 |
+
|
198 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
199 |
+
|
200 |
+
args.logger.info("Start training.")
|
201 |
+
|
202 |
+
for epoch in range(1, args.epochs_num + 1):
|
203 |
+
random.shuffle(trainset)
|
204 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
205 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
206 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
207 |
+
|
208 |
+
model.train()
|
209 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
210 |
+
|
211 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
212 |
+
total_loss += loss.item()
|
213 |
+
|
214 |
+
if (i + 1) % args.report_steps == 0:
|
215 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
216 |
+
total_loss = 0.0
|
217 |
+
|
218 |
+
result = evaluate(args, read_dataset(args, args.dev_path, args.dev_answer_path))
|
219 |
+
if result[0] > best_result:
|
220 |
+
best_result = result[0]
|
221 |
+
save_model(model, args.output_model_path)
|
222 |
+
|
223 |
+
|
224 |
+
if __name__ == "__main__":
|
225 |
+
main()
|
train/finetune/run_classifier.py
ADDED
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for classification.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
|
11 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
12 |
+
sys.path.append(tencentpretrain_dir)
|
13 |
+
|
14 |
+
from tencentpretrain.embeddings import *
|
15 |
+
from tencentpretrain.encoders import *
|
16 |
+
from tencentpretrain.utils.vocab import Vocab
|
17 |
+
from tencentpretrain.utils.constants import *
|
18 |
+
from tencentpretrain.utils import *
|
19 |
+
from tencentpretrain.utils.optimizers import *
|
20 |
+
from tencentpretrain.utils.config import load_hyperparam
|
21 |
+
from tencentpretrain.utils.seed import set_seed
|
22 |
+
from tencentpretrain.utils.logging import init_logger
|
23 |
+
from tencentpretrain.utils.misc import pooling
|
24 |
+
from tencentpretrain.model_saver import save_model
|
25 |
+
from tencentpretrain.opts import finetune_opts, tokenizer_opts, adv_opts
|
26 |
+
|
27 |
+
|
28 |
+
class Classifier(nn.Module):
|
29 |
+
def __init__(self, args):
|
30 |
+
super(Classifier, self).__init__()
|
31 |
+
self.embedding = Embedding(args)
|
32 |
+
for embedding_name in args.embedding:
|
33 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
34 |
+
self.embedding.update(tmp_emb, embedding_name)
|
35 |
+
self.encoder = str2encoder[args.encoder](args)
|
36 |
+
self.labels_num = args.labels_num
|
37 |
+
self.pooling_type = args.pooling
|
38 |
+
self.soft_targets = args.soft_targets
|
39 |
+
self.soft_alpha = args.soft_alpha
|
40 |
+
self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
|
41 |
+
self.output_layer_2 = nn.Linear(args.hidden_size, self.labels_num)
|
42 |
+
|
43 |
+
def forward(self, src, tgt, seg, soft_tgt=None):
|
44 |
+
"""
|
45 |
+
Args:
|
46 |
+
src: [batch_size x seq_length]
|
47 |
+
tgt: [batch_size]
|
48 |
+
seg: [batch_size x seq_length]
|
49 |
+
"""
|
50 |
+
# Embedding.
|
51 |
+
emb = self.embedding(src, seg)
|
52 |
+
# Encoder.
|
53 |
+
output = self.encoder(emb, seg)
|
54 |
+
# Target.
|
55 |
+
output = pooling(output, seg, self.pooling_type)
|
56 |
+
output = torch.tanh(self.output_layer_1(output))
|
57 |
+
logits = self.output_layer_2(output)
|
58 |
+
if tgt is not None:
|
59 |
+
if self.soft_targets and soft_tgt is not None:
|
60 |
+
loss = self.soft_alpha * nn.MSELoss()(logits, soft_tgt) + \
|
61 |
+
(1 - self.soft_alpha) * nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1))
|
62 |
+
else:
|
63 |
+
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1))
|
64 |
+
return loss, logits
|
65 |
+
else:
|
66 |
+
return None, logits
|
67 |
+
|
68 |
+
|
69 |
+
def count_labels_num(path):
|
70 |
+
labels_set, columns = set(), {}
|
71 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
72 |
+
for line_id, line in enumerate(f):
|
73 |
+
if line_id == 0:
|
74 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
75 |
+
columns[column_name] = i
|
76 |
+
continue
|
77 |
+
line = line.rstrip("\r\n").split("\t")
|
78 |
+
label = int(line[columns["label"]])
|
79 |
+
labels_set.add(label)
|
80 |
+
return len(labels_set)
|
81 |
+
|
82 |
+
|
83 |
+
def load_or_initialize_parameters(args, model):
|
84 |
+
if args.pretrained_model_path is not None:
|
85 |
+
# Initialize with pretrained model.
|
86 |
+
model.load_state_dict(torch.load(args.pretrained_model_path, map_location="cpu"), strict=False)
|
87 |
+
else:
|
88 |
+
# Initialize with normal distribution.
|
89 |
+
for n, p in list(model.named_parameters()):
|
90 |
+
if "gamma" not in n and "beta" not in n:
|
91 |
+
p.data.normal_(0, 0.02)
|
92 |
+
|
93 |
+
|
94 |
+
def build_optimizer(args, model):
|
95 |
+
param_optimizer = list(model.named_parameters())
|
96 |
+
no_decay = ["bias", "gamma", "beta"]
|
97 |
+
optimizer_grouped_parameters = [
|
98 |
+
{"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": 0.01},
|
99 |
+
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
|
100 |
+
]
|
101 |
+
if args.optimizer in ["adamw"]:
|
102 |
+
optimizer = str2optimizer[args.optimizer](optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False)
|
103 |
+
else:
|
104 |
+
optimizer = str2optimizer[args.optimizer](optimizer_grouped_parameters, lr=args.learning_rate,
|
105 |
+
scale_parameter=False, relative_step=False)
|
106 |
+
if args.scheduler in ["constant"]:
|
107 |
+
scheduler = str2scheduler[args.scheduler](optimizer)
|
108 |
+
elif args.scheduler in ["constant_with_warmup"]:
|
109 |
+
scheduler = str2scheduler[args.scheduler](optimizer, args.train_steps*args.warmup)
|
110 |
+
else:
|
111 |
+
scheduler = str2scheduler[args.scheduler](optimizer, args.train_steps*args.warmup, args.train_steps)
|
112 |
+
return optimizer, scheduler
|
113 |
+
|
114 |
+
|
115 |
+
def batch_loader(batch_size, src, tgt, seg, soft_tgt=None):
|
116 |
+
instances_num = src.size()[0]
|
117 |
+
for i in range(instances_num // batch_size):
|
118 |
+
src_batch = src[i * batch_size : (i + 1) * batch_size, :]
|
119 |
+
tgt_batch = tgt[i * batch_size : (i + 1) * batch_size]
|
120 |
+
seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]
|
121 |
+
if soft_tgt is not None:
|
122 |
+
soft_tgt_batch = soft_tgt[i * batch_size : (i + 1) * batch_size, :]
|
123 |
+
yield src_batch, tgt_batch, seg_batch, soft_tgt_batch
|
124 |
+
else:
|
125 |
+
yield src_batch, tgt_batch, seg_batch, None
|
126 |
+
if instances_num > instances_num // batch_size * batch_size:
|
127 |
+
src_batch = src[instances_num // batch_size * batch_size :, :]
|
128 |
+
tgt_batch = tgt[instances_num // batch_size * batch_size :]
|
129 |
+
seg_batch = seg[instances_num // batch_size * batch_size :, :]
|
130 |
+
if soft_tgt is not None:
|
131 |
+
soft_tgt_batch = soft_tgt[instances_num // batch_size * batch_size :, :]
|
132 |
+
yield src_batch, tgt_batch, seg_batch, soft_tgt_batch
|
133 |
+
else:
|
134 |
+
yield src_batch, tgt_batch, seg_batch, None
|
135 |
+
|
136 |
+
|
137 |
+
def read_dataset(args, path):
|
138 |
+
dataset, columns = [], {}
|
139 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
140 |
+
for line_id, line in enumerate(f):
|
141 |
+
if line_id == 0:
|
142 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
143 |
+
columns[column_name] = i
|
144 |
+
continue
|
145 |
+
line = line.rstrip("\r\n").split("\t")
|
146 |
+
tgt = int(line[columns["label"]])
|
147 |
+
if args.soft_targets and "logits" in columns.keys():
|
148 |
+
soft_tgt = [float(value) for value in line[columns["logits"]].split(" ")]
|
149 |
+
if "text_b" not in columns: # Sentence classification.
|
150 |
+
text_a = line[columns["text_a"]]
|
151 |
+
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
152 |
+
seg = [1] * len(src)
|
153 |
+
else: # Sentence-pair classification.
|
154 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
155 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
156 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
157 |
+
src = src_a + src_b
|
158 |
+
seg = [1] * len(src_a) + [2] * len(src_b)
|
159 |
+
|
160 |
+
if len(src) > args.seq_length:
|
161 |
+
src = src[: args.seq_length]
|
162 |
+
seg = seg[: args.seq_length]
|
163 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
164 |
+
while len(src) < args.seq_length:
|
165 |
+
src.append(PAD_ID)
|
166 |
+
seg.append(0)
|
167 |
+
if args.soft_targets and "logits" in columns.keys():
|
168 |
+
dataset.append((src, tgt, seg, soft_tgt))
|
169 |
+
else:
|
170 |
+
dataset.append((src, tgt, seg))
|
171 |
+
|
172 |
+
return dataset
|
173 |
+
|
174 |
+
|
175 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch=None):
|
176 |
+
model.zero_grad()
|
177 |
+
|
178 |
+
src_batch = src_batch.to(args.device)
|
179 |
+
tgt_batch = tgt_batch.to(args.device)
|
180 |
+
seg_batch = seg_batch.to(args.device)
|
181 |
+
if soft_tgt_batch is not None:
|
182 |
+
soft_tgt_batch = soft_tgt_batch.to(args.device)
|
183 |
+
|
184 |
+
loss, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
185 |
+
if torch.cuda.device_count() > 1:
|
186 |
+
loss = torch.mean(loss)
|
187 |
+
|
188 |
+
if args.fp16:
|
189 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
190 |
+
scaled_loss.backward()
|
191 |
+
else:
|
192 |
+
loss.backward()
|
193 |
+
|
194 |
+
if args.use_adv and args.adv_type == "fgm":
|
195 |
+
args.adv_method.attack(epsilon=args.fgm_epsilon)
|
196 |
+
loss_adv, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
197 |
+
if torch.cuda.device_count() > 1:
|
198 |
+
loss_adv = torch.mean(loss_adv)
|
199 |
+
loss_adv.backward()
|
200 |
+
args.adv_method.restore()
|
201 |
+
|
202 |
+
if args.use_adv and args.adv_type == "pgd":
|
203 |
+
K = args.pgd_k
|
204 |
+
args.adv_method.backup_grad()
|
205 |
+
for t in range(K):
|
206 |
+
# apply the perturbation to embedding
|
207 |
+
args.adv_method.attack(epsilon=args.pgd_epsilon, alpha=args.pgd_alpha,
|
208 |
+
is_first_attack=(t == 0))
|
209 |
+
if t != K - 1:
|
210 |
+
model.zero_grad()
|
211 |
+
else:
|
212 |
+
args.adv_method.restore_grad()
|
213 |
+
loss_adv, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
214 |
+
if torch.cuda.device_count() > 1:
|
215 |
+
loss_adv = torch.mean(loss_adv)
|
216 |
+
loss_adv.backward()
|
217 |
+
args.adv_method.restore()
|
218 |
+
|
219 |
+
optimizer.step()
|
220 |
+
scheduler.step()
|
221 |
+
|
222 |
+
return loss
|
223 |
+
|
224 |
+
|
225 |
+
def evaluate(args, dataset):
|
226 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
227 |
+
tgt = torch.LongTensor([sample[1] for sample in dataset])
|
228 |
+
seg = torch.LongTensor([sample[2] for sample in dataset])
|
229 |
+
|
230 |
+
batch_size = args.batch_size
|
231 |
+
|
232 |
+
correct = 0
|
233 |
+
# Confusion matrix.
|
234 |
+
confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long)
|
235 |
+
|
236 |
+
args.model.eval()
|
237 |
+
|
238 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
239 |
+
src_batch = src_batch.to(args.device)
|
240 |
+
tgt_batch = tgt_batch.to(args.device)
|
241 |
+
seg_batch = seg_batch.to(args.device)
|
242 |
+
with torch.no_grad():
|
243 |
+
_, logits = args.model(src_batch, tgt_batch, seg_batch)
|
244 |
+
pred = torch.argmax(nn.Softmax(dim=1)(logits), dim=1)
|
245 |
+
gold = tgt_batch
|
246 |
+
for j in range(pred.size()[0]):
|
247 |
+
confusion[pred[j], gold[j]] += 1
|
248 |
+
correct += torch.sum(pred == gold).item()
|
249 |
+
|
250 |
+
args.logger.info("Confusion matrix:")
|
251 |
+
args.logger.info(confusion)
|
252 |
+
args.logger.info("Report precision, recall, and f1:")
|
253 |
+
|
254 |
+
eps = 1e-9
|
255 |
+
for i in range(confusion.size()[0]):
|
256 |
+
p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps)
|
257 |
+
r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps)
|
258 |
+
f1 = 2 * p * r / (p + r + eps)
|
259 |
+
args.logger.info("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1))
|
260 |
+
|
261 |
+
args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / len(dataset), correct, len(dataset)))
|
262 |
+
return correct / len(dataset), confusion
|
263 |
+
|
264 |
+
|
265 |
+
def main():
|
266 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
267 |
+
|
268 |
+
finetune_opts(parser)
|
269 |
+
|
270 |
+
tokenizer_opts(parser)
|
271 |
+
|
272 |
+
parser.add_argument("--soft_targets", action='store_true',
|
273 |
+
help="Train model with logits.")
|
274 |
+
parser.add_argument("--soft_alpha", type=float, default=0.5,
|
275 |
+
help="Weight of the soft targets loss.")
|
276 |
+
|
277 |
+
adv_opts(parser)
|
278 |
+
|
279 |
+
args = parser.parse_args()
|
280 |
+
|
281 |
+
# Load the hyperparameters from the config file.
|
282 |
+
args = load_hyperparam(args)
|
283 |
+
# Count the number of labels.
|
284 |
+
args.labels_num = count_labels_num(args.train_path)
|
285 |
+
|
286 |
+
# Build tokenizer.
|
287 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
288 |
+
set_seed(args.seed)
|
289 |
+
|
290 |
+
# Build classification model.
|
291 |
+
model = Classifier(args)
|
292 |
+
|
293 |
+
# Load or initialize parameters.
|
294 |
+
load_or_initialize_parameters(args, model)
|
295 |
+
|
296 |
+
# Get logger.
|
297 |
+
args.logger = init_logger(args)
|
298 |
+
|
299 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
300 |
+
model = model.to(args.device)
|
301 |
+
|
302 |
+
# Training phase.
|
303 |
+
trainset = read_dataset(args, args.train_path)
|
304 |
+
instances_num = len(trainset)
|
305 |
+
batch_size = args.batch_size
|
306 |
+
|
307 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
308 |
+
|
309 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
310 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
311 |
+
optimizer, scheduler = build_optimizer(args, model)
|
312 |
+
|
313 |
+
if args.fp16:
|
314 |
+
try:
|
315 |
+
from apex import amp
|
316 |
+
except ImportError:
|
317 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
318 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
319 |
+
args.amp = amp
|
320 |
+
|
321 |
+
if torch.cuda.device_count() > 1:
|
322 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
323 |
+
model = torch.nn.DataParallel(model)
|
324 |
+
args.model = model
|
325 |
+
|
326 |
+
if args.use_adv:
|
327 |
+
args.adv_method = str2adv[args.adv_type](model)
|
328 |
+
|
329 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
330 |
+
|
331 |
+
args.logger.info("Start training.")
|
332 |
+
for epoch in range(1, args.epochs_num + 1):
|
333 |
+
random.shuffle(trainset)
|
334 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
335 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
336 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
337 |
+
if args.soft_targets:
|
338 |
+
soft_tgt = torch.FloatTensor([example[3] for example in trainset])
|
339 |
+
else:
|
340 |
+
soft_tgt = None
|
341 |
+
|
342 |
+
model.train()
|
343 |
+
for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, src, tgt, seg, soft_tgt)):
|
344 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
345 |
+
total_loss += loss.item()
|
346 |
+
if (i + 1) % args.report_steps == 0:
|
347 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
348 |
+
total_loss = 0.0
|
349 |
+
|
350 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
351 |
+
if result[0] > best_result:
|
352 |
+
best_result = result[0]
|
353 |
+
save_model(model, args.output_model_path)
|
354 |
+
|
355 |
+
# Evaluation phase.
|
356 |
+
if args.test_path is not None:
|
357 |
+
args.logger.info("Test set evaluation.")
|
358 |
+
if torch.cuda.device_count() > 1:
|
359 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
360 |
+
else:
|
361 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
362 |
+
evaluate(args, read_dataset(args, args.test_path))
|
363 |
+
|
364 |
+
|
365 |
+
if __name__ == "__main__":
|
366 |
+
main()
|
train/finetune/run_classifier_cv.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for classification with cross validation.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch.nn as nn
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
12 |
+
sys.path.append(tencentpretrain_dir)
|
13 |
+
|
14 |
+
from tencentpretrain.utils.constants import *
|
15 |
+
from tencentpretrain.utils import *
|
16 |
+
from tencentpretrain.utils.optimizers import *
|
17 |
+
from tencentpretrain.utils.config import load_hyperparam
|
18 |
+
from tencentpretrain.utils.seed import set_seed
|
19 |
+
from tencentpretrain.model_saver import save_model
|
20 |
+
from tencentpretrain.opts import *
|
21 |
+
from finetune.run_classifier import *
|
22 |
+
|
23 |
+
|
24 |
+
def main():
|
25 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
26 |
+
|
27 |
+
# Path options.
|
28 |
+
parser.add_argument("--pretrained_model_path", default=None, type=str,
|
29 |
+
help="Path of the pretrained model.")
|
30 |
+
parser.add_argument("--output_model_path", default="models/classifier_model.bin", type=str,
|
31 |
+
help="Path of the output model.")
|
32 |
+
parser.add_argument("--train_path", type=str, required=True,
|
33 |
+
help="Path of the trainset.")
|
34 |
+
parser.add_argument("--config_path", default="models/bert/base_config.json", type=str,
|
35 |
+
help="Path of the config file.")
|
36 |
+
parser.add_argument("--train_features_path", type=str, required=True,
|
37 |
+
help="Path of the train features for stacking.")
|
38 |
+
|
39 |
+
# Model options.
|
40 |
+
model_opts(parser)
|
41 |
+
|
42 |
+
# Tokenizer options.
|
43 |
+
tokenizer_opts(parser)
|
44 |
+
|
45 |
+
# Optimization options.
|
46 |
+
optimization_opts(parser)
|
47 |
+
parser.add_argument("--soft_targets", action='store_true',
|
48 |
+
help="Train model with logits.")
|
49 |
+
parser.add_argument("--soft_alpha", type=float, default=0.5,
|
50 |
+
help="Weight of the soft targets loss.")
|
51 |
+
|
52 |
+
# Training options.
|
53 |
+
training_opts(parser)
|
54 |
+
|
55 |
+
# Cross validation options.
|
56 |
+
parser.add_argument("--folds_num", type=int, default=5,
|
57 |
+
help="The number of folds for cross validation.")
|
58 |
+
|
59 |
+
adv_opts(parser)
|
60 |
+
|
61 |
+
args = parser.parse_args()
|
62 |
+
|
63 |
+
# Load the hyperparameters from the config file.
|
64 |
+
args = load_hyperparam(args)
|
65 |
+
|
66 |
+
# Get logger.
|
67 |
+
args.logger = init_logger(args)
|
68 |
+
|
69 |
+
set_seed(args.seed)
|
70 |
+
|
71 |
+
# Count the number of labels.
|
72 |
+
args.labels_num = count_labels_num(args.train_path)
|
73 |
+
|
74 |
+
# Build tokenizer.
|
75 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
76 |
+
|
77 |
+
# Training phase.
|
78 |
+
dataset = read_dataset(args, args.train_path)
|
79 |
+
instances_num = len(dataset)
|
80 |
+
batch_size = args.batch_size
|
81 |
+
instances_num_per_fold = instances_num // args.folds_num + 1
|
82 |
+
|
83 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
84 |
+
|
85 |
+
train_features = []
|
86 |
+
|
87 |
+
total_loss, result = 0.0, 0.0
|
88 |
+
acc, marco_f1 = 0.0, 0.0
|
89 |
+
|
90 |
+
for fold_id in range(args.folds_num):
|
91 |
+
# Build classification model.
|
92 |
+
model = Classifier(args)
|
93 |
+
|
94 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
95 |
+
model = model.to(args.device)
|
96 |
+
load_or_initialize_parameters(args, model)
|
97 |
+
optimizer, scheduler = build_optimizer(args, model)
|
98 |
+
if args.fp16:
|
99 |
+
try:
|
100 |
+
from apex import amp
|
101 |
+
except ImportError:
|
102 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
103 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
104 |
+
args.amp = amp
|
105 |
+
if torch.cuda.device_count() > 1:
|
106 |
+
model = torch.nn.DataParallel(model)
|
107 |
+
args.model = model
|
108 |
+
|
109 |
+
if args.use_adv:
|
110 |
+
args.adv_method = str2adv[args.adv_type](model)
|
111 |
+
|
112 |
+
trainset = dataset[0 : fold_id * instances_num_per_fold] + dataset[(fold_id + 1) * instances_num_per_fold :]
|
113 |
+
|
114 |
+
devset = dataset[fold_id * instances_num_per_fold : (fold_id + 1) * instances_num_per_fold]
|
115 |
+
|
116 |
+
dev_src = torch.LongTensor([example[0] for example in devset])
|
117 |
+
dev_tgt = torch.LongTensor([example[1] for example in devset])
|
118 |
+
dev_seg = torch.LongTensor([example[2] for example in devset])
|
119 |
+
dev_soft_tgt = None
|
120 |
+
|
121 |
+
for epoch in range(1, args.epochs_num + 1):
|
122 |
+
random.shuffle(trainset)
|
123 |
+
|
124 |
+
train_src = torch.LongTensor([example[0] for example in trainset])
|
125 |
+
train_tgt = torch.LongTensor([example[1] for example in trainset])
|
126 |
+
train_seg = torch.LongTensor([example[2] for example in trainset])
|
127 |
+
|
128 |
+
if args.soft_targets:
|
129 |
+
train_soft_tgt = torch.FloatTensor([example[3] for example in trainset])
|
130 |
+
else:
|
131 |
+
train_soft_tgt = None
|
132 |
+
|
133 |
+
model.train()
|
134 |
+
for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, train_src, train_tgt, train_seg, train_soft_tgt)):
|
135 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
136 |
+
total_loss += loss.item()
|
137 |
+
if (i + 1) % args.report_steps == 0:
|
138 |
+
args.logger.info("Fold id: {}, Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(fold_id, epoch, i + 1, total_loss / args.report_steps))
|
139 |
+
total_loss = 0.0
|
140 |
+
|
141 |
+
model.eval()
|
142 |
+
for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, dev_src, dev_tgt, dev_seg, dev_soft_tgt)):
|
143 |
+
src_batch = src_batch.to(args.device)
|
144 |
+
seg_batch = seg_batch.to(args.device)
|
145 |
+
with torch.no_grad():
|
146 |
+
_, logits = model(src_batch, None, seg_batch)
|
147 |
+
prob = nn.Softmax(dim=1)(logits)
|
148 |
+
prob = prob.cpu().numpy().tolist()
|
149 |
+
train_features.extend(prob)
|
150 |
+
|
151 |
+
output_model_name = ".".join(args.output_model_path.split(".")[:-1])
|
152 |
+
output_model_suffix = args.output_model_path.split(".")[-1]
|
153 |
+
save_model(model, output_model_name + "-fold_" + str(fold_id) + "." + output_model_suffix)
|
154 |
+
result = evaluate(args, devset)
|
155 |
+
acc += result[0] / args.folds_num
|
156 |
+
f1 = []
|
157 |
+
confusion = result[1]
|
158 |
+
eps = 1e-9
|
159 |
+
for i in range(confusion.size()[0]):
|
160 |
+
p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps)
|
161 |
+
r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps)
|
162 |
+
f1.append(2 * p * r / (p + r + eps))
|
163 |
+
|
164 |
+
marco_f1 += sum(f1) / len(f1) / args.folds_num
|
165 |
+
|
166 |
+
train_features = np.array(train_features)
|
167 |
+
np.save(args.train_features_path, train_features)
|
168 |
+
args.logger.info("Acc. : {:.4f}".format(acc))
|
169 |
+
args.logger.info("Marco F1 : {:.4f}".format(marco_f1))
|
170 |
+
|
171 |
+
|
172 |
+
if __name__ == "__main__":
|
173 |
+
main()
|
train/finetune/run_classifier_deepspeed.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to use DeepSpeed for classification.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
import deepspeed
|
11 |
+
import torch.distributed as dist
|
12 |
+
|
13 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
14 |
+
sys.path.append(tencentpretrain_dir)
|
15 |
+
|
16 |
+
from tencentpretrain.opts import deepspeed_opts
|
17 |
+
from finetune.run_classifier import *
|
18 |
+
|
19 |
+
|
20 |
+
def read_dataset(args, path, split):
|
21 |
+
dataset, columns = [], {}
|
22 |
+
if split:
|
23 |
+
for i in range(args.world_size):
|
24 |
+
dataset.append([])
|
25 |
+
index = 0
|
26 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
27 |
+
for line_id, line in enumerate(f):
|
28 |
+
if line_id == 0:
|
29 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
30 |
+
columns[column_name] = i
|
31 |
+
continue
|
32 |
+
line = line.rstrip("\r\n").split("\t")
|
33 |
+
tgt = int(line[columns["label"]])
|
34 |
+
if args.soft_targets and "logits" in columns.keys():
|
35 |
+
soft_tgt = [float(value) for value in line[columns["logits"]].split(" ")]
|
36 |
+
if "text_b" not in columns: # Sentence classification.
|
37 |
+
text_a = line[columns["text_a"]]
|
38 |
+
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
39 |
+
seg = [1] * len(src)
|
40 |
+
else: # Sentence-pair classification.
|
41 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
42 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
43 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
44 |
+
src = src_a + src_b
|
45 |
+
seg = [1] * len(src_a) + [2] * len(src_b)
|
46 |
+
|
47 |
+
if len(src) > args.seq_length:
|
48 |
+
src = src[: args.seq_length]
|
49 |
+
seg = seg[: args.seq_length]
|
50 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
51 |
+
while len(src) < args.seq_length:
|
52 |
+
src.append(PAD_ID)
|
53 |
+
seg.append(0)
|
54 |
+
if split:
|
55 |
+
if args.soft_targets and "logits" in columns.keys():
|
56 |
+
dataset[index].append((src, tgt, seg, soft_tgt))
|
57 |
+
else:
|
58 |
+
dataset[index].append((src, tgt, seg))
|
59 |
+
index += 1
|
60 |
+
if index == args.world_size:
|
61 |
+
index = 0
|
62 |
+
else:
|
63 |
+
if args.soft_targets and "logits" in columns.keys():
|
64 |
+
dataset.append((src, tgt, seg, soft_tgt))
|
65 |
+
else:
|
66 |
+
dataset.append((src, tgt, seg))
|
67 |
+
if split:
|
68 |
+
max_data_num_rank_index = 0
|
69 |
+
max_data_num = len(dataset[0])
|
70 |
+
for i in range(args.world_size):
|
71 |
+
if len(dataset[i]) > max_data_num:
|
72 |
+
max_data_num_rank_index = i
|
73 |
+
max_data_num = len(dataset[i])
|
74 |
+
for i in range(args.world_size):
|
75 |
+
if len(dataset[i]) < max_data_num:
|
76 |
+
dataset[i].append(dataset[max_data_num_rank_index][-1])
|
77 |
+
|
78 |
+
return dataset
|
79 |
+
|
80 |
+
|
81 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch=None):
|
82 |
+
model.zero_grad()
|
83 |
+
|
84 |
+
src_batch = src_batch.to(args.device)
|
85 |
+
tgt_batch = tgt_batch.to(args.device)
|
86 |
+
seg_batch = seg_batch.to(args.device)
|
87 |
+
if soft_tgt_batch is not None:
|
88 |
+
soft_tgt_batch = soft_tgt_batch.to(args.device)
|
89 |
+
|
90 |
+
loss, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
91 |
+
if torch.cuda.device_count() > 1:
|
92 |
+
loss = torch.mean(loss)
|
93 |
+
|
94 |
+
model.backward(loss)
|
95 |
+
|
96 |
+
model.step()
|
97 |
+
|
98 |
+
return loss
|
99 |
+
|
100 |
+
|
101 |
+
def main():
|
102 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
103 |
+
|
104 |
+
finetune_opts(parser)
|
105 |
+
|
106 |
+
parser.add_argument("--world_size", type=int, default=1,
|
107 |
+
help="Total number of processes (GPUs) for training.")
|
108 |
+
|
109 |
+
tokenizer_opts(parser)
|
110 |
+
|
111 |
+
parser.add_argument("--soft_targets", action='store_true',
|
112 |
+
help="Train model with logits.")
|
113 |
+
parser.add_argument("--soft_alpha", type=float, default=0.5,
|
114 |
+
help="Weight of the soft targets loss.")
|
115 |
+
|
116 |
+
deepspeed_opts(parser)
|
117 |
+
|
118 |
+
args = parser.parse_args()
|
119 |
+
|
120 |
+
# Load the hyperparameters from the config file.
|
121 |
+
args = load_hyperparam(args)
|
122 |
+
|
123 |
+
set_seed(args.seed)
|
124 |
+
|
125 |
+
# Count the number of labels.
|
126 |
+
args.labels_num = count_labels_num(args.train_path)
|
127 |
+
|
128 |
+
# Build tokenizer.
|
129 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
130 |
+
|
131 |
+
# Build classification model.
|
132 |
+
model = Classifier(args)
|
133 |
+
|
134 |
+
# Load or initialize parameters.
|
135 |
+
load_or_initialize_parameters(args, model)
|
136 |
+
|
137 |
+
# Get logger.
|
138 |
+
args.logger = init_logger(args)
|
139 |
+
|
140 |
+
param_optimizer = list(model.named_parameters())
|
141 |
+
no_decay = ["bias", "gamma", "beta"]
|
142 |
+
optimizer_grouped_parameters = [
|
143 |
+
{"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": 0.01},
|
144 |
+
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
|
145 |
+
]
|
146 |
+
|
147 |
+
deepspeed.init_distributed()
|
148 |
+
rank = dist.get_rank()
|
149 |
+
args.rank = rank
|
150 |
+
|
151 |
+
trainset = read_dataset(args, args.train_path, split=True)[args.rank]
|
152 |
+
random.shuffle(trainset)
|
153 |
+
instances_num = len(trainset)
|
154 |
+
batch_size = args.batch_size
|
155 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
156 |
+
|
157 |
+
custom_optimizer, custom_scheduler = build_optimizer(args, model)
|
158 |
+
|
159 |
+
model, optimizer, _, scheduler = deepspeed.initialize(
|
160 |
+
model=model,
|
161 |
+
model_parameters=optimizer_grouped_parameters,
|
162 |
+
args=args,
|
163 |
+
optimizer=custom_optimizer,
|
164 |
+
lr_scheduler=custom_scheduler,
|
165 |
+
mpu=None,
|
166 |
+
dist_init_required=False)
|
167 |
+
|
168 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
169 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
170 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
171 |
+
if args.soft_targets:
|
172 |
+
soft_tgt = torch.FloatTensor([example[3] for example in trainset])
|
173 |
+
else:
|
174 |
+
soft_tgt = None
|
175 |
+
|
176 |
+
args.model = model
|
177 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
178 |
+
|
179 |
+
total_loss, result, best_result, best_epoch = 0.0, 0.0, 0.0, 0
|
180 |
+
|
181 |
+
result_tensor = torch.tensor(result).to(args.device)
|
182 |
+
if args.rank == 0:
|
183 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
184 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
185 |
+
args.logger.info("Start training.")
|
186 |
+
|
187 |
+
for epoch in range(1, args.epochs_num + 1):
|
188 |
+
model.train()
|
189 |
+
for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, src, tgt, seg, soft_tgt)):
|
190 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
191 |
+
total_loss += loss.item()
|
192 |
+
if (i + 1) % args.report_steps == 0 and args.rank == 0:
|
193 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
194 |
+
total_loss = 0.0
|
195 |
+
if args.rank == 0:
|
196 |
+
result = evaluate(args, read_dataset(args, args.dev_path, split=False))
|
197 |
+
result_tensor = torch.tensor(result[0]).to(args.device)
|
198 |
+
dist.broadcast(result_tensor, 0, async_op=False)
|
199 |
+
if result_tensor.float() >= best_result:
|
200 |
+
best_result = result_tensor.float().item()
|
201 |
+
best_epoch = epoch
|
202 |
+
model.save_checkpoint(args.output_model_path, str(epoch))
|
203 |
+
|
204 |
+
# Evaluation phase.
|
205 |
+
if args.test_path is not None and args.rank == 0:
|
206 |
+
args.logger.info("Test set evaluation.")
|
207 |
+
model.load_checkpoint(args.output_model_path, str(best_epoch))
|
208 |
+
evaluate(args, read_dataset(args, args.test_path, split=False), True)
|
209 |
+
|
210 |
+
|
211 |
+
if __name__ == "__main__":
|
212 |
+
main()
|
train/finetune/run_classifier_grid.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for classification with grid search.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import torch
|
7 |
+
import random
|
8 |
+
import argparse
|
9 |
+
from itertools import product
|
10 |
+
|
11 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
12 |
+
sys.path.append(tencentpretrain_dir)
|
13 |
+
|
14 |
+
from tencentpretrain.opts import *
|
15 |
+
from finetune.run_classifier import *
|
16 |
+
|
17 |
+
|
18 |
+
def main():
|
19 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
20 |
+
|
21 |
+
finetune_opts(parser)
|
22 |
+
|
23 |
+
tokenizer_opts(parser)
|
24 |
+
|
25 |
+
parser.add_argument("--soft_targets", action='store_true',
|
26 |
+
help="Train model with logits.")
|
27 |
+
parser.add_argument("--soft_alpha", type=float, default=0.5,
|
28 |
+
help="Weight of the soft targets loss.")
|
29 |
+
|
30 |
+
parser.add_argument("--batch_size_list", default=[32, 64], nargs='+', type=int,
|
31 |
+
help="A list of batch sizes for grid search.")
|
32 |
+
parser.add_argument("--learning_rate_list", type=float, default=[3e-5, 1e-4, 3e-4], nargs='+',
|
33 |
+
help="A list of learning rate for grid search.")
|
34 |
+
parser.add_argument("--epochs_num_list", type=int, default=[3, 5, 8], nargs='+',
|
35 |
+
help="A list of number of epochs for grid search.")
|
36 |
+
|
37 |
+
adv_opts(parser)
|
38 |
+
|
39 |
+
args = parser.parse_args()
|
40 |
+
|
41 |
+
# Load the hyperparameters from the config file.
|
42 |
+
args = load_hyperparam(args)
|
43 |
+
|
44 |
+
set_seed(args.seed)
|
45 |
+
|
46 |
+
# Count the number of labels.
|
47 |
+
args.labels_num = count_labels_num(args.train_path)
|
48 |
+
|
49 |
+
# Build tokenizer.
|
50 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
51 |
+
|
52 |
+
# Get logger.
|
53 |
+
args.logger = init_logger(args)
|
54 |
+
|
55 |
+
best_acc = 0
|
56 |
+
config = {}
|
57 |
+
|
58 |
+
# Build dataset
|
59 |
+
trainset = read_dataset(args, args.train_path)
|
60 |
+
instances_num = len(trainset)
|
61 |
+
|
62 |
+
for batch_size, learning_rate, epochs_num in product(args.batch_size_list, args.learning_rate_list, args.epochs_num_list):
|
63 |
+
|
64 |
+
args.learning_rate = learning_rate
|
65 |
+
args.batch_size = batch_size
|
66 |
+
args.epochs_num = epochs_num
|
67 |
+
|
68 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
69 |
+
|
70 |
+
# Build classification model.
|
71 |
+
model = Classifier(args)
|
72 |
+
|
73 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
74 |
+
model = model.to(args.device)
|
75 |
+
load_or_initialize_parameters(args, model)
|
76 |
+
optimizer, scheduler = build_optimizer(args, model)
|
77 |
+
if args.fp16:
|
78 |
+
try:
|
79 |
+
from apex import amp
|
80 |
+
except ImportError:
|
81 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
82 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
83 |
+
args.amp = amp
|
84 |
+
if torch.cuda.device_count() > 1:
|
85 |
+
model = torch.nn.DataParallel(model)
|
86 |
+
args.model = model
|
87 |
+
|
88 |
+
if args.use_adv:
|
89 |
+
args.adv_method = str2adv[args.adv_type](model)
|
90 |
+
|
91 |
+
# Training phase.
|
92 |
+
|
93 |
+
total_loss, _, _ = 0., 0., 0.
|
94 |
+
|
95 |
+
for _ in range(1, args.epochs_num + 1):
|
96 |
+
random.shuffle(trainset)
|
97 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
98 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
99 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
100 |
+
if args.soft_targets:
|
101 |
+
soft_tgt = torch.FloatTensor([example[3] for example in trainset])
|
102 |
+
else:
|
103 |
+
soft_tgt = None
|
104 |
+
|
105 |
+
model.train()
|
106 |
+
for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, src, tgt, seg, soft_tgt)):
|
107 |
+
_ = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch)
|
108 |
+
|
109 |
+
acc, _ = evaluate(args, read_dataset(args, args.dev_path))
|
110 |
+
|
111 |
+
if acc > best_acc:
|
112 |
+
best_acc = acc
|
113 |
+
config = {"learning_rate": learning_rate, "batch_size": batch_size, "epochs_num": epochs_num}
|
114 |
+
args.logger.info("On configuration: {}.\n".format(config))
|
115 |
+
|
116 |
+
args.logger.info("Best Acc. is: {:.4f}, on configuration {}.".format(best_acc, config))
|
117 |
+
|
118 |
+
|
119 |
+
if __name__ == "__main__":
|
120 |
+
main()
|
train/finetune/run_classifier_mt.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for multi-task classification.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
|
11 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
12 |
+
sys.path.append(tencentpretrain_dir)
|
13 |
+
|
14 |
+
from tencentpretrain.embeddings import *
|
15 |
+
from tencentpretrain.encoders import *
|
16 |
+
from tencentpretrain.utils.constants import *
|
17 |
+
from tencentpretrain.utils import *
|
18 |
+
from tencentpretrain.utils.optimizers import *
|
19 |
+
from tencentpretrain.utils.config import load_hyperparam
|
20 |
+
from tencentpretrain.utils.seed import set_seed
|
21 |
+
from tencentpretrain.utils.logging import init_logger
|
22 |
+
from tencentpretrain.utils.misc import pooling
|
23 |
+
from tencentpretrain.model_saver import save_model
|
24 |
+
from tencentpretrain.opts import *
|
25 |
+
from finetune.run_classifier import count_labels_num, batch_loader, build_optimizer, load_or_initialize_parameters, train_model, read_dataset, evaluate
|
26 |
+
|
27 |
+
|
28 |
+
class MultitaskClassifier(nn.Module):
|
29 |
+
def __init__(self, args):
|
30 |
+
super(MultitaskClassifier, self).__init__()
|
31 |
+
self.embedding = Embedding(args)
|
32 |
+
for embedding_name in args.embedding:
|
33 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
34 |
+
self.embedding.update(tmp_emb, embedding_name)
|
35 |
+
self.encoder = str2encoder[args.encoder](args)
|
36 |
+
self.pooling_type = args.pooling
|
37 |
+
self.output_layers_1 = nn.ModuleList([nn.Linear(args.hidden_size, args.hidden_size) for _ in args.labels_num_list])
|
38 |
+
self.output_layers_2 = nn.ModuleList([nn.Linear(args.hidden_size, labels_num) for labels_num in args.labels_num_list])
|
39 |
+
|
40 |
+
self.dataset_id = 0
|
41 |
+
|
42 |
+
def forward(self, src, tgt, seg, soft_tgt=None):
|
43 |
+
"""
|
44 |
+
Args:
|
45 |
+
src: [batch_size x seq_length]
|
46 |
+
tgt: [batch_size]
|
47 |
+
seg: [batch_size x seq_length]
|
48 |
+
"""
|
49 |
+
# Embedding.
|
50 |
+
emb = self.embedding(src, seg)
|
51 |
+
# Encoder.
|
52 |
+
output = self.encoder(emb, seg)
|
53 |
+
# Target.
|
54 |
+
output = pooling(output, seg, self.pooling_type)
|
55 |
+
output = torch.tanh(self.output_layers_1[self.dataset_id](output))
|
56 |
+
logits = self.output_layers_2[self.dataset_id](output)
|
57 |
+
if tgt is not None:
|
58 |
+
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1))
|
59 |
+
return loss, logits
|
60 |
+
else:
|
61 |
+
return None, logits
|
62 |
+
|
63 |
+
def change_dataset(self, dataset_id):
|
64 |
+
self.dataset_id = dataset_id
|
65 |
+
|
66 |
+
|
67 |
+
def pack_dataset(dataset, dataset_id, batch_size):
|
68 |
+
packed_dataset = []
|
69 |
+
src_batch, tgt_batch, seg_batch = [], [], []
|
70 |
+
for i, sample in enumerate(dataset):
|
71 |
+
src_batch.append(sample[0])
|
72 |
+
tgt_batch.append(sample[1])
|
73 |
+
seg_batch.append(sample[2])
|
74 |
+
if (i + 1) % batch_size == 0:
|
75 |
+
packed_dataset.append((dataset_id, torch.LongTensor(src_batch), torch.LongTensor(tgt_batch), torch.LongTensor(seg_batch)))
|
76 |
+
src_batch, tgt_batch, seg_batch = [], [], []
|
77 |
+
continue
|
78 |
+
if len(src_batch) > 0:
|
79 |
+
packed_dataset.append((dataset_id, torch.LongTensor(src_batch), torch.LongTensor(tgt_batch), torch.LongTensor(seg_batch)))
|
80 |
+
|
81 |
+
return packed_dataset
|
82 |
+
|
83 |
+
|
84 |
+
def main():
|
85 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
86 |
+
|
87 |
+
# Path options.
|
88 |
+
parser.add_argument("--pretrained_model_path", default=None, type=str,
|
89 |
+
help="Path of the pretrained model.")
|
90 |
+
parser.add_argument("--dataset_path_list", default=[], nargs='+', type=str, help="Dataset path list.")
|
91 |
+
parser.add_argument("--output_model_path", default="models/multitask_classifier_model.bin", type=str,
|
92 |
+
help="Path of the output model.")
|
93 |
+
parser.add_argument("--config_path", default="models/bert/base_config.json", type=str,
|
94 |
+
help="Path of the config file.")
|
95 |
+
|
96 |
+
# Model options.
|
97 |
+
model_opts(parser)
|
98 |
+
|
99 |
+
# Tokenizer options.
|
100 |
+
tokenizer_opts(parser)
|
101 |
+
|
102 |
+
# Optimizer options.
|
103 |
+
optimization_opts(parser)
|
104 |
+
|
105 |
+
# Training options.
|
106 |
+
training_opts(parser)
|
107 |
+
|
108 |
+
adv_opts(parser)
|
109 |
+
|
110 |
+
args = parser.parse_args()
|
111 |
+
|
112 |
+
args.soft_targets = False
|
113 |
+
|
114 |
+
# Load the hyperparameters from the config file.
|
115 |
+
args = load_hyperparam(args)
|
116 |
+
|
117 |
+
set_seed(args.seed)
|
118 |
+
|
119 |
+
# Count the number of labels.
|
120 |
+
args.labels_num_list = [count_labels_num(os.path.join(path, "train.tsv")) for path in args.dataset_path_list]
|
121 |
+
|
122 |
+
args.datasets_num = len(args.dataset_path_list)
|
123 |
+
|
124 |
+
# Build tokenizer.
|
125 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
126 |
+
|
127 |
+
# Build multi-task classification model.
|
128 |
+
model = MultitaskClassifier(args)
|
129 |
+
|
130 |
+
# Load or initialize parameters.
|
131 |
+
load_or_initialize_parameters(args, model)
|
132 |
+
|
133 |
+
# Get logger.
|
134 |
+
args.logger = init_logger(args)
|
135 |
+
|
136 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
137 |
+
model = model.to(args.device)
|
138 |
+
args.model = model
|
139 |
+
|
140 |
+
if args.use_adv:
|
141 |
+
args.adv_method = str2adv[args.adv_type](model)
|
142 |
+
|
143 |
+
# Training phase.
|
144 |
+
dataset_list = [read_dataset(args, os.path.join(path, "train.tsv")) for path in args.dataset_path_list]
|
145 |
+
packed_dataset_list = [pack_dataset(dataset, i, args.batch_size) for i, dataset in enumerate(dataset_list)]
|
146 |
+
|
147 |
+
packed_dataset_all = []
|
148 |
+
for packed_dataset in packed_dataset_list:
|
149 |
+
packed_dataset_all += packed_dataset
|
150 |
+
|
151 |
+
instances_num = sum([len(dataset) for dataset in dataset_list])
|
152 |
+
batch_size = args.batch_size
|
153 |
+
|
154 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
155 |
+
|
156 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
157 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
158 |
+
|
159 |
+
optimizer, scheduler = build_optimizer(args, model)
|
160 |
+
|
161 |
+
if args.fp16:
|
162 |
+
try:
|
163 |
+
from apex import amp
|
164 |
+
except ImportError:
|
165 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
166 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
167 |
+
args.amp = amp
|
168 |
+
|
169 |
+
if torch.cuda.device_count() > 1:
|
170 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
171 |
+
model = torch.nn.DataParallel(model)
|
172 |
+
|
173 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
174 |
+
|
175 |
+
args.logger.info("Start training.")
|
176 |
+
|
177 |
+
for epoch in range(1, args.epochs_num + 1):
|
178 |
+
random.shuffle(packed_dataset_all)
|
179 |
+
model.train()
|
180 |
+
for i, (dataset_id, src_batch, tgt_batch, seg_batch) in enumerate(packed_dataset_all):
|
181 |
+
if hasattr(model, "module"):
|
182 |
+
model.module.change_dataset(dataset_id)
|
183 |
+
else:
|
184 |
+
model.change_dataset(dataset_id)
|
185 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, None)
|
186 |
+
total_loss += loss.item()
|
187 |
+
if (i + 1) % args.report_steps == 0:
|
188 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
189 |
+
total_loss = 0.0
|
190 |
+
|
191 |
+
for dataset_id, path in enumerate(args.dataset_path_list):
|
192 |
+
args.labels_num = args.labels_num_list[dataset_id]
|
193 |
+
if hasattr(model, "module"):
|
194 |
+
model.module.change_dataset(dataset_id)
|
195 |
+
else:
|
196 |
+
model.change_dataset(dataset_id)
|
197 |
+
result = evaluate(args, read_dataset(args, os.path.join(path, "dev.tsv")))
|
198 |
+
|
199 |
+
save_model(model, args.output_model_path)
|
200 |
+
|
201 |
+
|
202 |
+
if __name__ == "__main__":
|
203 |
+
main()
|
train/finetune/run_classifier_multi_label.py
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for multi-label classification.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
import time
|
11 |
+
import datetime
|
12 |
+
import json
|
13 |
+
|
14 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
15 |
+
sys.path.append(tencentpretrain_dir)
|
16 |
+
|
17 |
+
from tencentpretrain.embeddings import *
|
18 |
+
from tencentpretrain.encoders import *
|
19 |
+
from tencentpretrain.utils.vocab import Vocab
|
20 |
+
from tencentpretrain.utils.constants import *
|
21 |
+
from tencentpretrain.utils import *
|
22 |
+
from tencentpretrain.utils.optimizers import *
|
23 |
+
from tencentpretrain.utils.config import load_hyperparam
|
24 |
+
from tencentpretrain.utils.seed import set_seed
|
25 |
+
from tencentpretrain.utils.logging import init_logger
|
26 |
+
from tencentpretrain.utils.misc import pooling
|
27 |
+
from tencentpretrain.model_saver import save_model
|
28 |
+
from tencentpretrain.opts import finetune_opts, tokenizer_opts, adv_opts
|
29 |
+
from finetune.run_classifier import load_or_initialize_parameters, build_optimizer, batch_loader
|
30 |
+
|
31 |
+
|
32 |
+
class MultilabelClassifier(nn.Module):
|
33 |
+
def __init__(self, args):
|
34 |
+
super(MultilabelClassifier, self).__init__()
|
35 |
+
self.embedding = Embedding(args)
|
36 |
+
for embedding_name in args.embedding:
|
37 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
38 |
+
self.embedding.update(tmp_emb, embedding_name)
|
39 |
+
self.encoder = str2encoder[args.encoder](args)
|
40 |
+
self.labels_num = args.labels_num
|
41 |
+
self.pooling_type = args.pooling
|
42 |
+
self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
|
43 |
+
self.output_layer_2 = nn.Linear(args.hidden_size, self.labels_num)
|
44 |
+
|
45 |
+
def forward(self, src, tgt, seg):
|
46 |
+
"""
|
47 |
+
Args:
|
48 |
+
src: [batch_size x seq_length]
|
49 |
+
tgt: [batch_size]
|
50 |
+
seg: [batch_size x seq_length]
|
51 |
+
"""
|
52 |
+
# Embedding.
|
53 |
+
emb = self.embedding(src, seg)
|
54 |
+
# Encoder.
|
55 |
+
output = self.encoder(emb, seg)
|
56 |
+
# Target.
|
57 |
+
output = pooling(output, seg, self.pooling_type)
|
58 |
+
output = torch.tanh(self.output_layer_1(output))
|
59 |
+
logits = self.output_layer_2(output)
|
60 |
+
if tgt is not None:
|
61 |
+
probs_batch = nn.Sigmoid()(logits)
|
62 |
+
loss = nn.BCELoss()(probs_batch, tgt)
|
63 |
+
return loss, logits
|
64 |
+
else:
|
65 |
+
return None, logits
|
66 |
+
|
67 |
+
|
68 |
+
def count_labels_num(path):
|
69 |
+
labels_set, columns = set(), {}
|
70 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
71 |
+
for line_id, line in enumerate(f):
|
72 |
+
if line_id == 0:
|
73 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
74 |
+
columns[column_name] = i
|
75 |
+
continue
|
76 |
+
line = line.rstrip("\r\n").split("\t")
|
77 |
+
label = set(line[columns["label"]].split(","))
|
78 |
+
labels_set |= label
|
79 |
+
return len(labels_set)
|
80 |
+
|
81 |
+
|
82 |
+
def read_dataset(args, path):
|
83 |
+
dataset, columns = [], {}
|
84 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
85 |
+
for line_id, line in enumerate(f):
|
86 |
+
if line_id == 0:
|
87 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
88 |
+
columns[column_name] = i
|
89 |
+
continue
|
90 |
+
line = line.rstrip("\r\n").split("\t")
|
91 |
+
tgt = [0] * args.labels_num
|
92 |
+
for idx in [int(_) for _ in line[columns["label"]].split(",")]:
|
93 |
+
tgt[idx] = 1
|
94 |
+
if "text_b" not in columns: # Sentence classification.
|
95 |
+
text_a = line[columns["text_a"]]
|
96 |
+
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
97 |
+
seg = [1] * len(src)
|
98 |
+
else: # Sentence-pair classification.
|
99 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
100 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
101 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
102 |
+
src = src_a + src_b
|
103 |
+
seg = [1] * len(src_a) + [2] * len(src_b)
|
104 |
+
|
105 |
+
if len(src) > args.seq_length:
|
106 |
+
src = src[: args.seq_length]
|
107 |
+
seg = seg[: args.seq_length]
|
108 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
109 |
+
while len(src) < args.seq_length:
|
110 |
+
src.append(PAD_ID)
|
111 |
+
seg.append(0)
|
112 |
+
|
113 |
+
dataset.append((src, tgt, seg))
|
114 |
+
|
115 |
+
return dataset
|
116 |
+
|
117 |
+
|
118 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch):
|
119 |
+
model.zero_grad()
|
120 |
+
|
121 |
+
src_batch = src_batch.to(args.device)
|
122 |
+
tgt_batch = tgt_batch.to(args.device)
|
123 |
+
seg_batch = seg_batch.to(args.device)
|
124 |
+
|
125 |
+
loss, _ = model(src_batch, tgt_batch, seg_batch)
|
126 |
+
if torch.cuda.device_count() > 1:
|
127 |
+
loss = torch.mean(loss)
|
128 |
+
|
129 |
+
if args.fp16:
|
130 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
131 |
+
scaled_loss.backward()
|
132 |
+
else:
|
133 |
+
loss.backward()
|
134 |
+
|
135 |
+
if args.use_adv and args.adv_type == "fgm":
|
136 |
+
args.adv_method.attack(epsilon=args.fgm_epsilon)
|
137 |
+
loss_adv, _ = model(src_batch, tgt_batch, seg_batch)
|
138 |
+
if torch.cuda.device_count() > 1:
|
139 |
+
loss_adv = torch.mean(loss_adv)
|
140 |
+
loss_adv.backward()
|
141 |
+
args.adv_method.restore()
|
142 |
+
|
143 |
+
if args.use_adv and args.adv_type == "pgd":
|
144 |
+
K = args.pgd_k
|
145 |
+
args.adv_method.backup_grad()
|
146 |
+
for t in range(K):
|
147 |
+
# apply the perturbation to embedding
|
148 |
+
args.adv_method.attack(epsilon=args.pgd_epsilon, alpha=args.pgd_alpha,
|
149 |
+
is_first_attack=(t == 0))
|
150 |
+
if t != K - 1:
|
151 |
+
model.zero_grad()
|
152 |
+
else:
|
153 |
+
args.adv_method.restore_grad()
|
154 |
+
loss_adv, _ = model(src_batch, tgt_batch, seg_batch)
|
155 |
+
if torch.cuda.device_count() > 1:
|
156 |
+
loss_adv = torch.mean(loss_adv)
|
157 |
+
loss_adv.backward()
|
158 |
+
args.adv_method.restore()
|
159 |
+
|
160 |
+
optimizer.step()
|
161 |
+
scheduler.step()
|
162 |
+
|
163 |
+
return loss
|
164 |
+
|
165 |
+
|
166 |
+
def evaluate(args, dataset):
|
167 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
168 |
+
tgt = torch.tensor([sample[1] for sample in dataset], dtype=torch.float)
|
169 |
+
seg = torch.LongTensor([sample[2] for sample in dataset])
|
170 |
+
|
171 |
+
batch_size = args.batch_size
|
172 |
+
|
173 |
+
correct = 0
|
174 |
+
|
175 |
+
args.model.eval()
|
176 |
+
|
177 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
178 |
+
src_batch = src_batch.to(args.device)
|
179 |
+
tgt_batch = tgt_batch.to(args.device)
|
180 |
+
seg_batch = seg_batch.to(args.device)
|
181 |
+
with torch.no_grad():
|
182 |
+
_, logits = args.model(src_batch, tgt_batch, seg_batch)
|
183 |
+
probs_batch = nn.Sigmoid()(logits)
|
184 |
+
predict_label_batch = (probs_batch > 0.5).float()
|
185 |
+
gold = tgt_batch
|
186 |
+
|
187 |
+
for k in range(len(predict_label_batch)):
|
188 |
+
correct += predict_label_batch[k].equal(gold[k])
|
189 |
+
|
190 |
+
args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / len(dataset), correct, len(dataset)))
|
191 |
+
return correct / len(dataset)
|
192 |
+
|
193 |
+
|
194 |
+
def main():
|
195 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
196 |
+
|
197 |
+
finetune_opts(parser)
|
198 |
+
|
199 |
+
tokenizer_opts(parser)
|
200 |
+
|
201 |
+
adv_opts(parser)
|
202 |
+
|
203 |
+
args = parser.parse_args()
|
204 |
+
|
205 |
+
# Load the hyperparameters from the config file.
|
206 |
+
args = load_hyperparam(args)
|
207 |
+
# Count the number of labels.
|
208 |
+
args.labels_num = count_labels_num(args.train_path)
|
209 |
+
|
210 |
+
# Build tokenizer.
|
211 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
212 |
+
set_seed(args.seed)
|
213 |
+
|
214 |
+
# Build classification model.
|
215 |
+
model = MultilabelClassifier(args)
|
216 |
+
|
217 |
+
# Load or initialize parameters.
|
218 |
+
load_or_initialize_parameters(args, model)
|
219 |
+
|
220 |
+
# Get logger.
|
221 |
+
args.logger = init_logger(args)
|
222 |
+
|
223 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
224 |
+
model = model.to(args.device)
|
225 |
+
|
226 |
+
# Training phase.
|
227 |
+
trainset = read_dataset(args, args.train_path)
|
228 |
+
instances_num = len(trainset)
|
229 |
+
batch_size = args.batch_size
|
230 |
+
|
231 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
232 |
+
|
233 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
234 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
235 |
+
optimizer, scheduler = build_optimizer(args, model)
|
236 |
+
|
237 |
+
if args.fp16:
|
238 |
+
try:
|
239 |
+
from apex import amp
|
240 |
+
except ImportError:
|
241 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
242 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
243 |
+
args.amp = amp
|
244 |
+
|
245 |
+
if torch.cuda.device_count() > 1:
|
246 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
247 |
+
model = torch.nn.DataParallel(model)
|
248 |
+
args.model = model
|
249 |
+
|
250 |
+
if args.use_adv:
|
251 |
+
args.adv_method = str2adv[args.adv_type](model)
|
252 |
+
|
253 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
254 |
+
|
255 |
+
args.logger.info("Start training.")
|
256 |
+
|
257 |
+
for epoch in range(1, args.epochs_num + 1):
|
258 |
+
random.shuffle(trainset)
|
259 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
260 |
+
tgt = torch.tensor([sample[1] for sample in trainset], dtype=torch.float)
|
261 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
262 |
+
|
263 |
+
model.train()
|
264 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
265 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
266 |
+
total_loss += loss.item()
|
267 |
+
if (i + 1) % args.report_steps == 0:
|
268 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
269 |
+
total_loss = 0.0
|
270 |
+
|
271 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
272 |
+
if result > best_result:
|
273 |
+
best_result = result
|
274 |
+
save_model(model, args.output_model_path)
|
275 |
+
|
276 |
+
# Evaluation phase.
|
277 |
+
if args.test_path is not None:
|
278 |
+
args.logger.info("Test set evaluation.")
|
279 |
+
if torch.cuda.device_count() > 1:
|
280 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
281 |
+
else:
|
282 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
283 |
+
evaluate(args, read_dataset(args, args.test_path))
|
284 |
+
|
285 |
+
|
286 |
+
if __name__ == "__main__":
|
287 |
+
main()
|
train/finetune/run_classifier_prompt.py
ADDED
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to use prompt for classification.
|
3 |
+
"""
|
4 |
+
import re
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
import logging
|
8 |
+
import random
|
9 |
+
import argparse
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
|
13 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
14 |
+
sys.path.append(tencentpretrain_dir)
|
15 |
+
|
16 |
+
from finetune.run_classifier import *
|
17 |
+
from tencentpretrain.targets import *
|
18 |
+
|
19 |
+
|
20 |
+
class ClozeTest(nn.Module):
|
21 |
+
def __init__(self, args):
|
22 |
+
super(ClozeTest, self).__init__()
|
23 |
+
self.embedding = Embedding(args)
|
24 |
+
for embedding_name in args.embedding:
|
25 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
26 |
+
self.embedding.update(tmp_emb, embedding_name)
|
27 |
+
self.encoder = str2encoder[args.encoder](args)
|
28 |
+
self.target = MlmTarget(args, len(args.tokenizer.vocab))
|
29 |
+
if args.tie_weights:
|
30 |
+
self.target.mlm_linear_2.weight = self.embedding.word_embedding.weight
|
31 |
+
self.answer_position = args.answer_position
|
32 |
+
self.device = args.device
|
33 |
+
|
34 |
+
def forward(self, src, tgt, seg):
|
35 |
+
emb = self.embedding(src, seg)
|
36 |
+
memory_bank = self.encoder(emb, seg)
|
37 |
+
output_mlm = self.target.act(self.target.mlm_linear_1(memory_bank))
|
38 |
+
output_mlm = self.target.layer_norm(output_mlm)
|
39 |
+
tgt_mlm = tgt.contiguous().view(-1)
|
40 |
+
if self.target.factorized_embedding_parameterization:
|
41 |
+
output_mlm = output_mlm.contiguous().view(-1, self.target.emb_size)
|
42 |
+
else:
|
43 |
+
output_mlm = output_mlm.contiguous().view(-1, self.target.hidden_size)
|
44 |
+
output_mlm = output_mlm[tgt_mlm > 0, :]
|
45 |
+
tgt_mlm = tgt_mlm[tgt_mlm > 0]
|
46 |
+
self.answer_position = self.answer_position.to(self.device).view(-1)
|
47 |
+
logits = self.target.mlm_linear_2(output_mlm)
|
48 |
+
logits = logits * self.answer_position
|
49 |
+
prob = self.target.softmax(logits)
|
50 |
+
loss = self.target.criterion(prob, tgt_mlm)
|
51 |
+
pred = prob[:, self.answer_position > 0].argmax(dim=-1)
|
52 |
+
|
53 |
+
return loss, pred, logits
|
54 |
+
|
55 |
+
|
56 |
+
def read_dataset(args, path):
|
57 |
+
dataset, columns = [], {}
|
58 |
+
count, ignore_count = 0, 0
|
59 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
60 |
+
for line_id, line in enumerate(f):
|
61 |
+
if line_id == 0:
|
62 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
63 |
+
columns[column_name] = i
|
64 |
+
continue
|
65 |
+
line = line.rstrip("\r\n").split("\t")
|
66 |
+
mask_position = -1
|
67 |
+
label = args.answer_word_dict[str(line[columns["label"]])]
|
68 |
+
tgt_token_id = args.tokenizer.vocab[label]
|
69 |
+
src = [args.tokenizer.vocab.get(CLS_TOKEN)]
|
70 |
+
if "text_b" not in columns: # Sentence classification.
|
71 |
+
text_a = line[columns["text_a"]]
|
72 |
+
text_a_token_id = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_a))
|
73 |
+
max_length = args.seq_length - args.template_length - 2
|
74 |
+
text_a_token_id = text_a_token_id[:max_length]
|
75 |
+
for prompt_token in args.prompt_template:
|
76 |
+
if prompt_token == "[TEXT_A]":
|
77 |
+
src += text_a_token_id
|
78 |
+
elif prompt_token == "[ANS]":
|
79 |
+
src += [args.tokenizer.vocab.get(MASK_TOKEN)]
|
80 |
+
mask_position = len(src) - 1
|
81 |
+
else:
|
82 |
+
src += prompt_token
|
83 |
+
else: # Sentence-pair classification.
|
84 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
85 |
+
text_a_token_id = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_a))
|
86 |
+
text_b_token_id = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b))
|
87 |
+
max_length = args.seq_length - args.template_length - len(text_a_token_id) - 3
|
88 |
+
text_b_token_id = text_b_token_id[:max_length]
|
89 |
+
for prompt_token in args.prompt_template:
|
90 |
+
if prompt_token == "[TEXT_A]":
|
91 |
+
src += text_a_token_id
|
92 |
+
src += [args.tokenizer.vocab.get(SEP_TOKEN)]
|
93 |
+
elif prompt_token == "[ANS]":
|
94 |
+
src += [args.tokenizer.vocab.get(MASK_TOKEN)]
|
95 |
+
mask_position = len(src) - 1
|
96 |
+
elif prompt_token == "[TEXT_B]":
|
97 |
+
src += text_b_token_id
|
98 |
+
else:
|
99 |
+
src += prompt_token
|
100 |
+
src += [args.tokenizer.vocab.get(SEP_TOKEN)]
|
101 |
+
seg = [1] * len(src)
|
102 |
+
|
103 |
+
if len(src) > args.seq_length:
|
104 |
+
src = src[: args.seq_length]
|
105 |
+
seg = seg[: args.seq_length]
|
106 |
+
|
107 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
108 |
+
while len(src) < args.seq_length:
|
109 |
+
src.append(PAD_ID)
|
110 |
+
seg.append(0)
|
111 |
+
tgt = [0] * len(src)
|
112 |
+
# Ignore the sentence which the answer is not in a sequence
|
113 |
+
if mask_position >= args.seq_length:
|
114 |
+
ignore_count += 1
|
115 |
+
continue
|
116 |
+
tgt[mask_position] = tgt_token_id
|
117 |
+
count += 1
|
118 |
+
dataset.append((src, tgt, seg))
|
119 |
+
args.logger.info(f"read dataset, count:{count}, ignore_count:{ignore_count}")
|
120 |
+
return dataset
|
121 |
+
|
122 |
+
|
123 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch):
|
124 |
+
model.zero_grad()
|
125 |
+
|
126 |
+
src_batch = src_batch.to(args.device)
|
127 |
+
tgt_batch = tgt_batch.to(args.device)
|
128 |
+
seg_batch = seg_batch.to(args.device)
|
129 |
+
|
130 |
+
loss, _, _ = model(src_batch, tgt_batch, seg_batch)
|
131 |
+
if torch.cuda.device_count() > 1:
|
132 |
+
loss = torch.mean(loss)
|
133 |
+
|
134 |
+
if args.fp16:
|
135 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
136 |
+
scaled_loss.backward()
|
137 |
+
else:
|
138 |
+
loss.backward()
|
139 |
+
|
140 |
+
optimizer.step()
|
141 |
+
scheduler.step()
|
142 |
+
|
143 |
+
return loss
|
144 |
+
|
145 |
+
|
146 |
+
def process_prompt_template(args):
|
147 |
+
with open(args.prompt_path, "r", encoding="utf-8") as f_json:
|
148 |
+
temp_dict = json.load(f_json)
|
149 |
+
template_str = temp_dict[args.prompt_id]["template"]
|
150 |
+
template_list = re.split(r"(\[TEXT_B\]|\[TEXT_A\]|\[ANS\])", template_str)
|
151 |
+
args.prompt_template = []
|
152 |
+
template_length = 0
|
153 |
+
for term in template_list:
|
154 |
+
if len(term) > 0:
|
155 |
+
if term not in ["[TEXT_B]", "[TEXT_A]", "[ANS]"]:
|
156 |
+
term_tokens = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(term))
|
157 |
+
args.prompt_template.append(term_tokens)
|
158 |
+
template_length += len(term_tokens)
|
159 |
+
elif term in ["[TEXT_B]", "[TEXT_A]"]:
|
160 |
+
args.prompt_template.append(term)
|
161 |
+
else:
|
162 |
+
args.prompt_template.append(term)
|
163 |
+
template_length += 1
|
164 |
+
print(args.prompt_template)
|
165 |
+
args.answer_word_dict = temp_dict[args.prompt_id]["answer_words"]
|
166 |
+
args.answer_word_dict_inv = {v: k for k, v in args.answer_word_dict.items()}
|
167 |
+
args.template_length = template_length
|
168 |
+
|
169 |
+
|
170 |
+
def evaluate(args, dataset):
|
171 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
172 |
+
tgt = torch.LongTensor([sample[1] for sample in dataset])
|
173 |
+
seg = torch.LongTensor([sample[2] for sample in dataset])
|
174 |
+
|
175 |
+
batch_size = args.batch_size
|
176 |
+
|
177 |
+
correct = 0
|
178 |
+
labels = {}
|
179 |
+
for k in sorted([args.tokenizer.vocab[k] for k in args.answer_word_dict_inv]):
|
180 |
+
labels[k] = len(labels)
|
181 |
+
labels_inv = {v: k for k, v in labels.items()}
|
182 |
+
confusion = torch.zeros(len(labels), len(labels), dtype=torch.long)
|
183 |
+
args.model.eval()
|
184 |
+
|
185 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
186 |
+
src_batch = src_batch.to(args.device)
|
187 |
+
tgt_batch = tgt_batch.to(args.device)
|
188 |
+
seg_batch = seg_batch.to(args.device)
|
189 |
+
|
190 |
+
with torch.no_grad():
|
191 |
+
_, pred, _ = args.model(src_batch, tgt_batch, seg_batch)
|
192 |
+
gold = tgt_batch[tgt_batch > 0]
|
193 |
+
for j in range(pred.size()[0]):
|
194 |
+
pred[j] = labels_inv[int(pred[j])]
|
195 |
+
confusion[labels[int(pred[j])], labels[int(gold[j])]] += 1
|
196 |
+
correct += torch.sum(pred == gold).item()
|
197 |
+
|
198 |
+
args.logger.debug("Confusion matrix:")
|
199 |
+
args.logger.debug(confusion)
|
200 |
+
args.logger.debug("Report precision, recall, and f1:")
|
201 |
+
|
202 |
+
eps = 1e-9
|
203 |
+
for i in range(confusion.size()[0]):
|
204 |
+
p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps)
|
205 |
+
r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps)
|
206 |
+
f1 = 2 * p * r / (p + r + eps)
|
207 |
+
args.logger.debug("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1))
|
208 |
+
|
209 |
+
args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / len(dataset), correct, len(dataset)))
|
210 |
+
return correct / len(dataset), confusion
|
211 |
+
|
212 |
+
|
213 |
+
def main():
|
214 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
215 |
+
tokenizer_opts(parser)
|
216 |
+
finetune_opts(parser)
|
217 |
+
|
218 |
+
parser.add_argument("--prompt_id", type=str, default="chnsenticorp_char")
|
219 |
+
parser.add_argument("--prompt_path", type=str, default="models/prompts.json")
|
220 |
+
|
221 |
+
args = parser.parse_args()
|
222 |
+
|
223 |
+
# Load the hyperparameters from the config file.
|
224 |
+
args = load_hyperparam(args)
|
225 |
+
|
226 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
227 |
+
set_seed(args.seed)
|
228 |
+
|
229 |
+
process_prompt_template(args)
|
230 |
+
|
231 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
232 |
+
|
233 |
+
answer_position = [0] * len(args.tokenizer.vocab)
|
234 |
+
for answer in args.answer_word_dict_inv:
|
235 |
+
answer_position[int(args.tokenizer.vocab[answer])] = 1
|
236 |
+
args.answer_position = torch.LongTensor(answer_position)
|
237 |
+
# Build classification model.
|
238 |
+
model = ClozeTest(args)
|
239 |
+
|
240 |
+
# Load or initialize parameters.
|
241 |
+
load_or_initialize_parameters(args, model)
|
242 |
+
|
243 |
+
# Get logger.
|
244 |
+
args.logger = init_logger(args)
|
245 |
+
|
246 |
+
model = model.to(args.device)
|
247 |
+
|
248 |
+
# Training phase.
|
249 |
+
trainset = read_dataset(args, args.train_path)
|
250 |
+
|
251 |
+
instances_num = len(trainset)
|
252 |
+
batch_size = args.batch_size
|
253 |
+
|
254 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
255 |
+
|
256 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
257 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
258 |
+
optimizer, scheduler = build_optimizer(args, model)
|
259 |
+
|
260 |
+
if args.fp16:
|
261 |
+
try:
|
262 |
+
from apex import amp
|
263 |
+
except ImportError:
|
264 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
265 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
266 |
+
args.amp = amp
|
267 |
+
|
268 |
+
if torch.cuda.device_count() > 1:
|
269 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
270 |
+
model = torch.nn.DataParallel(model)
|
271 |
+
args.model = model
|
272 |
+
|
273 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
274 |
+
|
275 |
+
args.logger.info("Start training.")
|
276 |
+
for epoch in range(1, args.epochs_num + 1):
|
277 |
+
random.shuffle(trainset)
|
278 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
279 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
280 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
281 |
+
|
282 |
+
model.train()
|
283 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg, None)):
|
284 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
285 |
+
total_loss += loss.item()
|
286 |
+
if (i + 1) % args.report_steps == 0:
|
287 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
288 |
+
total_loss = 0.0
|
289 |
+
|
290 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
291 |
+
if result[0] > best_result:
|
292 |
+
best_result = result[0]
|
293 |
+
save_model(model, args.output_model_path)
|
294 |
+
|
295 |
+
# Evaluation phase.
|
296 |
+
if args.epochs_num == 0:
|
297 |
+
args.output_model_path = args.pretrained_model_path
|
298 |
+
if args.test_path is not None:
|
299 |
+
args.logger.info("Test set evaluation.")
|
300 |
+
if torch.cuda.device_count() > 1:
|
301 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path), strict=False)
|
302 |
+
else:
|
303 |
+
args.model.load_state_dict(torch.load(args.output_model_path), strict=False)
|
304 |
+
evaluate(args, read_dataset(args, args.test_path))
|
305 |
+
|
306 |
+
|
307 |
+
if __name__ == "__main__":
|
308 |
+
main()
|
train/finetune/run_classifier_siamese.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for classification with siamese network.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import collections
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
|
12 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
13 |
+
sys.path.append(tencentpretrain_dir)
|
14 |
+
|
15 |
+
from tencentpretrain.embeddings import *
|
16 |
+
from tencentpretrain.encoders import *
|
17 |
+
from tencentpretrain.targets import *
|
18 |
+
from tencentpretrain.utils.vocab import Vocab
|
19 |
+
from tencentpretrain.utils.constants import *
|
20 |
+
from tencentpretrain.utils import *
|
21 |
+
from tencentpretrain.utils.optimizers import *
|
22 |
+
from tencentpretrain.utils.config import load_hyperparam
|
23 |
+
from tencentpretrain.utils.seed import set_seed
|
24 |
+
from tencentpretrain.utils.logging import init_logger
|
25 |
+
from tencentpretrain.utils.misc import pooling
|
26 |
+
from tencentpretrain.model_saver import save_model
|
27 |
+
from tencentpretrain.opts import finetune_opts, tokenizer_opts
|
28 |
+
from finetune.run_classifier import count_labels_num, build_optimizer
|
29 |
+
|
30 |
+
|
31 |
+
class SiameseClassifier(nn.Module):
|
32 |
+
def __init__(self, args):
|
33 |
+
super(SiameseClassifier, self).__init__()
|
34 |
+
self.embedding = Embedding(args)
|
35 |
+
for embedding_name in args.embedding:
|
36 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
37 |
+
self.embedding.update(tmp_emb, embedding_name)
|
38 |
+
self.encoder = DualEncoder(args)
|
39 |
+
|
40 |
+
self.classifier = nn.Linear(4 * args.stream_0["hidden_size"], args.labels_num)
|
41 |
+
self.pooling_type = args.pooling
|
42 |
+
|
43 |
+
def forward(self, src, tgt, seg):
|
44 |
+
"""
|
45 |
+
Args:
|
46 |
+
src: [batch_size x seq_length]
|
47 |
+
tgt: [batch_size]
|
48 |
+
seg: [batch_size x seq_length]
|
49 |
+
"""
|
50 |
+
# Embedding.
|
51 |
+
emb = self.embedding(src, seg)
|
52 |
+
# Encoder.
|
53 |
+
output = self.encoder(emb, seg)
|
54 |
+
# Target.
|
55 |
+
features_0, features_1 = output
|
56 |
+
features_0 = pooling(features_0, seg[0], self.pooling_type)
|
57 |
+
features_1 = pooling(features_1, seg[1], self.pooling_type)
|
58 |
+
|
59 |
+
vectors_concat = []
|
60 |
+
|
61 |
+
# concatenation
|
62 |
+
vectors_concat.append(features_0)
|
63 |
+
vectors_concat.append(features_1)
|
64 |
+
# difference:
|
65 |
+
vectors_concat.append(torch.abs(features_0 - features_1))
|
66 |
+
# multiplication:
|
67 |
+
vectors_concat.append(features_0 * features_1)
|
68 |
+
|
69 |
+
features = torch.cat(vectors_concat, 1)
|
70 |
+
|
71 |
+
logits = self.classifier(features)
|
72 |
+
|
73 |
+
if tgt is not None:
|
74 |
+
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1))
|
75 |
+
return loss, logits
|
76 |
+
else:
|
77 |
+
return None, logits
|
78 |
+
|
79 |
+
|
80 |
+
def load_or_initialize_parameters(args, model):
|
81 |
+
if args.pretrained_model_path is not None:
|
82 |
+
# Initialize with pretrained model.
|
83 |
+
state_dict = torch.load(args.pretrained_model_path, map_location="cpu")
|
84 |
+
load_siamese_weights = False
|
85 |
+
for key in state_dict.keys():
|
86 |
+
if key.find("embedding_0") != -1:
|
87 |
+
load_siamese_weights = True
|
88 |
+
break
|
89 |
+
if not load_siamese_weights:
|
90 |
+
siamese_state_dict = collections.OrderedDict()
|
91 |
+
for key in state_dict.keys():
|
92 |
+
if key.split('.')[0] == "embedding":
|
93 |
+
siamese_state_dict["embedding.embedding_0." + ".".join(key.split('.')[1:])] = state_dict[key]
|
94 |
+
siamese_state_dict["embedding.embedding_1." + ".".join(key.split('.')[1:])] = state_dict[key]
|
95 |
+
if key.split('.')[0] == "encoder":
|
96 |
+
siamese_state_dict["encoder.encoder_0." + ".".join(key.split('.')[1:])] = state_dict[key]
|
97 |
+
siamese_state_dict["encoder.encoder_1." + ".".join(key.split('.')[1:])] = state_dict[key]
|
98 |
+
model.load_state_dict(siamese_state_dict, strict=False)
|
99 |
+
else:
|
100 |
+
model.load_state_dict(state_dict, strict=False)
|
101 |
+
else:
|
102 |
+
# Initialize with normal distribution.
|
103 |
+
for n, p in list(model.named_parameters()):
|
104 |
+
if "gamma" not in n and "beta" not in n:
|
105 |
+
p.data.normal_(0, 0.02)
|
106 |
+
|
107 |
+
|
108 |
+
def batch_loader(batch_size, src, tgt, seg):
|
109 |
+
instances_num = tgt.size()[0]
|
110 |
+
src_a, src_b = src
|
111 |
+
seg_a, seg_b = seg
|
112 |
+
for i in range(instances_num // batch_size):
|
113 |
+
src_a_batch = src_a[i * batch_size : (i + 1) * batch_size, :]
|
114 |
+
src_b_batch = src_b[i * batch_size : (i + 1) * batch_size, :]
|
115 |
+
tgt_batch = tgt[i * batch_size : (i + 1) * batch_size]
|
116 |
+
seg_a_batch = seg_a[i * batch_size : (i + 1) * batch_size, :]
|
117 |
+
seg_b_batch = seg_b[i * batch_size : (i + 1) * batch_size, :]
|
118 |
+
yield (src_a_batch, src_b_batch), tgt_batch, (seg_a_batch, seg_b_batch)
|
119 |
+
if instances_num > instances_num // batch_size * batch_size:
|
120 |
+
src_a_batch = src_a[instances_num // batch_size * batch_size :, :]
|
121 |
+
src_b_batch = src_b[instances_num // batch_size * batch_size :, :]
|
122 |
+
tgt_batch = tgt[instances_num // batch_size * batch_size :]
|
123 |
+
seg_a_batch = seg_a[instances_num // batch_size * batch_size :, :]
|
124 |
+
seg_b_batch = seg_b[instances_num // batch_size * batch_size :, :]
|
125 |
+
yield (src_a_batch, src_b_batch), tgt_batch, (seg_a_batch, seg_b_batch)
|
126 |
+
|
127 |
+
|
128 |
+
def read_dataset(args, path):
|
129 |
+
dataset, columns = [], {}
|
130 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
131 |
+
for line_id, line in enumerate(f):
|
132 |
+
if line_id == 0:
|
133 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
134 |
+
columns[column_name] = i
|
135 |
+
continue
|
136 |
+
line = line.rstrip("\r\n").split("\t")
|
137 |
+
tgt = int(line[columns["label"]])
|
138 |
+
|
139 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
140 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
141 |
+
src_b = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
142 |
+
seg_a = [1] * len(src_a)
|
143 |
+
seg_b = [1] * len(src_b)
|
144 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
145 |
+
|
146 |
+
if len(src_a) >= args.seq_length:
|
147 |
+
src_a = src_a[:args.seq_length]
|
148 |
+
seg_a = seg_a[:args.seq_length]
|
149 |
+
while len(src_a) < args.seq_length:
|
150 |
+
src_a.append(PAD_ID)
|
151 |
+
seg_a.append(0)
|
152 |
+
|
153 |
+
if len(src_b) >= args.seq_length:
|
154 |
+
src_b = src_b[:args.seq_length]
|
155 |
+
seg_b = seg_b[:args.seq_length]
|
156 |
+
while len(src_b) < args.seq_length:
|
157 |
+
src_b.append(PAD_ID)
|
158 |
+
seg_b.append(0)
|
159 |
+
|
160 |
+
dataset.append(((src_a, src_b), tgt, (seg_a, seg_b)))
|
161 |
+
|
162 |
+
return dataset
|
163 |
+
|
164 |
+
|
165 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch):
|
166 |
+
model.zero_grad()
|
167 |
+
|
168 |
+
src_a_batch, src_b_batch = src_batch
|
169 |
+
seg_a_batch, seg_b_batch = seg_batch
|
170 |
+
|
171 |
+
src_a_batch = src_a_batch.to(args.device)
|
172 |
+
src_b_batch = src_b_batch.to(args.device)
|
173 |
+
|
174 |
+
tgt_batch = tgt_batch.to(args.device)
|
175 |
+
|
176 |
+
seg_a_batch = seg_a_batch.to(args.device)
|
177 |
+
seg_b_batch = seg_b_batch.to(args.device)
|
178 |
+
|
179 |
+
loss, _ = model((src_a_batch, src_b_batch), tgt_batch, (seg_a_batch, seg_b_batch))
|
180 |
+
|
181 |
+
if torch.cuda.device_count() > 1:
|
182 |
+
loss = torch.mean(loss)
|
183 |
+
|
184 |
+
if args.fp16:
|
185 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
186 |
+
scaled_loss.backward()
|
187 |
+
else:
|
188 |
+
loss.backward()
|
189 |
+
|
190 |
+
optimizer.step()
|
191 |
+
scheduler.step()
|
192 |
+
|
193 |
+
return loss
|
194 |
+
|
195 |
+
|
196 |
+
def evaluate(args, dataset):
|
197 |
+
src_a = torch.LongTensor([example[0][0] for example in dataset])
|
198 |
+
src_b = torch.LongTensor([example[0][1] for example in dataset])
|
199 |
+
tgt = torch.LongTensor([example[1] for example in dataset])
|
200 |
+
seg_a = torch.LongTensor([example[2][0] for example in dataset])
|
201 |
+
seg_b = torch.LongTensor([example[2][1] for example in dataset])
|
202 |
+
|
203 |
+
batch_size = args.batch_size
|
204 |
+
|
205 |
+
correct = 0
|
206 |
+
# Confusion matrix.
|
207 |
+
confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long)
|
208 |
+
|
209 |
+
args.model.eval()
|
210 |
+
|
211 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(batch_loader(batch_size, (src_a, src_b), tgt, (seg_a, seg_b))):
|
212 |
+
|
213 |
+
src_a_batch, src_b_batch = src_batch
|
214 |
+
seg_a_batch, seg_b_batch = seg_batch
|
215 |
+
|
216 |
+
src_a_batch = src_a_batch.to(args.device)
|
217 |
+
src_b_batch = src_b_batch.to(args.device)
|
218 |
+
|
219 |
+
tgt_batch = tgt_batch.to(args.device)
|
220 |
+
|
221 |
+
seg_a_batch = seg_a_batch.to(args.device)
|
222 |
+
seg_b_batch = seg_b_batch.to(args.device)
|
223 |
+
|
224 |
+
with torch.no_grad():
|
225 |
+
_, logits = args.model((src_a_batch, src_b_batch), None, (seg_a_batch, seg_b_batch))
|
226 |
+
pred = torch.argmax(nn.Softmax(dim=1)(logits), dim=1)
|
227 |
+
gold = tgt_batch
|
228 |
+
for j in range(pred.size()[0]):
|
229 |
+
confusion[pred[j], gold[j]] += 1
|
230 |
+
correct += torch.sum(pred == gold).item()
|
231 |
+
|
232 |
+
args.logger.debug("Confusion matrix:")
|
233 |
+
args.logger.debug(confusion)
|
234 |
+
args.logger.debug("Report precision, recall, and f1:")
|
235 |
+
|
236 |
+
eps = 1e-9
|
237 |
+
for i in range(confusion.size()[0]):
|
238 |
+
p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps)
|
239 |
+
r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps)
|
240 |
+
f1 = 2 * p * r / (p + r + eps)
|
241 |
+
args.logger.debug("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1))
|
242 |
+
|
243 |
+
args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / len(dataset), correct, len(dataset)))
|
244 |
+
return correct / len(dataset), confusion
|
245 |
+
|
246 |
+
|
247 |
+
def main():
|
248 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
249 |
+
|
250 |
+
finetune_opts(parser)
|
251 |
+
|
252 |
+
tokenizer_opts(parser)
|
253 |
+
|
254 |
+
args = parser.parse_args()
|
255 |
+
|
256 |
+
# Load the hyperparameters from the config file.
|
257 |
+
args = load_hyperparam(args)
|
258 |
+
|
259 |
+
set_seed(args.seed)
|
260 |
+
|
261 |
+
# Count the number of labels.
|
262 |
+
args.labels_num = count_labels_num(args.train_path)
|
263 |
+
|
264 |
+
# Build tokenizer.
|
265 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
266 |
+
|
267 |
+
# Build classification model.
|
268 |
+
model = SiameseClassifier(args)
|
269 |
+
|
270 |
+
# Load or initialize parameters.
|
271 |
+
load_or_initialize_parameters(args, model)
|
272 |
+
|
273 |
+
# Get logger.
|
274 |
+
args.logger = init_logger(args)
|
275 |
+
|
276 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
277 |
+
model = model.to(args.device)
|
278 |
+
|
279 |
+
# Training phase.
|
280 |
+
trainset = read_dataset(args, args.train_path)
|
281 |
+
instances_num = len(trainset)
|
282 |
+
batch_size = args.batch_size
|
283 |
+
|
284 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
285 |
+
|
286 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
287 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
288 |
+
|
289 |
+
optimizer, scheduler = build_optimizer(args, model)
|
290 |
+
|
291 |
+
if args.fp16:
|
292 |
+
try:
|
293 |
+
from apex import amp
|
294 |
+
except ImportError:
|
295 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
296 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
297 |
+
args.amp = amp
|
298 |
+
|
299 |
+
if torch.cuda.device_count() > 1:
|
300 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
301 |
+
model = torch.nn.DataParallel(model)
|
302 |
+
args.model = model
|
303 |
+
|
304 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
305 |
+
|
306 |
+
args.logger.info("Start training.")
|
307 |
+
|
308 |
+
for epoch in range(1, args.epochs_num + 1):
|
309 |
+
random.shuffle(trainset)
|
310 |
+
src_a = torch.LongTensor([example[0][0] for example in trainset])
|
311 |
+
src_b = torch.LongTensor([example[0][1] for example in trainset])
|
312 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
313 |
+
seg_a = torch.LongTensor([example[2][0] for example in trainset])
|
314 |
+
seg_b = torch.LongTensor([example[2][1] for example in trainset])
|
315 |
+
|
316 |
+
model.train()
|
317 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(batch_loader(batch_size, (src_a, src_b), tgt, (seg_a, seg_b))):
|
318 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
319 |
+
total_loss += loss.item()
|
320 |
+
if (i + 1) % args.report_steps == 0:
|
321 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
322 |
+
total_loss = 0.0
|
323 |
+
|
324 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
325 |
+
if result[0] > best_result:
|
326 |
+
best_result = result[0]
|
327 |
+
save_model(model, args.output_model_path)
|
328 |
+
|
329 |
+
# Evaluation phase.
|
330 |
+
if args.test_path is not None:
|
331 |
+
args.logger.info("Test set evaluation.")
|
332 |
+
if torch.cuda.device_count() > 1:
|
333 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
334 |
+
else:
|
335 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
336 |
+
evaluate(args, read_dataset(args, args.test_path))
|
337 |
+
|
338 |
+
|
339 |
+
if __name__ == "__main__":
|
340 |
+
main()
|
train/finetune/run_cmrc.py
ADDED
@@ -0,0 +1,447 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for Chinese machine reading comprehension.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
import argparse
|
8 |
+
import json
|
9 |
+
import random
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
|
13 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
14 |
+
sys.path.append(tencentpretrain_dir)
|
15 |
+
|
16 |
+
from tencentpretrain.embeddings import *
|
17 |
+
from tencentpretrain.encoders import *
|
18 |
+
from tencentpretrain.utils.constants import *
|
19 |
+
from tencentpretrain.utils.tokenizers import *
|
20 |
+
from tencentpretrain.utils.optimizers import *
|
21 |
+
from tencentpretrain.utils.config import load_hyperparam
|
22 |
+
from tencentpretrain.utils.seed import set_seed
|
23 |
+
from tencentpretrain.utils.logging import init_logger
|
24 |
+
from tencentpretrain.model_saver import save_model
|
25 |
+
from tencentpretrain.opts import finetune_opts
|
26 |
+
from finetune.run_classifier import build_optimizer, load_or_initialize_parameters
|
27 |
+
|
28 |
+
|
29 |
+
class MachineReadingComprehension(nn.Module):
|
30 |
+
def __init__(self, args):
|
31 |
+
super(MachineReadingComprehension, self).__init__()
|
32 |
+
self.embedding = Embedding(args)
|
33 |
+
for embedding_name in args.embedding:
|
34 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
35 |
+
self.embedding.update(tmp_emb, embedding_name)
|
36 |
+
self.encoder = str2encoder[args.encoder](args)
|
37 |
+
self.output_layer = nn.Linear(args.hidden_size, 2)
|
38 |
+
|
39 |
+
def forward(self, src, seg, start_position, end_position):
|
40 |
+
# Embedding.
|
41 |
+
emb = self.embedding(src, seg)
|
42 |
+
# Encoder.
|
43 |
+
output = self.encoder(emb, seg)
|
44 |
+
# Target.
|
45 |
+
logits = self.output_layer(output)
|
46 |
+
|
47 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
48 |
+
start_logits, end_logits = start_logits.squeeze(-1), end_logits.squeeze(-1)
|
49 |
+
start_loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(start_logits), start_position)
|
50 |
+
end_loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(end_logits), end_position)
|
51 |
+
loss = (start_loss + end_loss) / 2
|
52 |
+
|
53 |
+
return loss, start_logits, end_logits
|
54 |
+
|
55 |
+
|
56 |
+
def read_examples(path):
|
57 |
+
# Read squad-style examples.
|
58 |
+
examples = []
|
59 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
60 |
+
for article in json.load(f)["data"]:
|
61 |
+
for para in article["paragraphs"]:
|
62 |
+
context = para["context"]
|
63 |
+
for qa in para["qas"]:
|
64 |
+
question = qa["question"]
|
65 |
+
question_id = qa["id"]
|
66 |
+
answer_texts, start_positions, end_positions = [], [], []
|
67 |
+
for answer in qa["answers"]:
|
68 |
+
answer_texts.append(answer["text"])
|
69 |
+
start_positions.append(answer["answer_start"])
|
70 |
+
end_positions.append(answer["answer_start"] + len(answer["text"]) - 1)
|
71 |
+
examples.append((context, question, question_id, start_positions, end_positions, answer_texts))
|
72 |
+
return examples
|
73 |
+
|
74 |
+
|
75 |
+
def convert_examples_to_dataset(args, examples):
|
76 |
+
# Converts a list of examples into a dataset that can be directly given as input to a model.
|
77 |
+
dataset = []
|
78 |
+
print("The number of questions in the dataset:{}".format(len(examples)))
|
79 |
+
for i in range(len(examples)):
|
80 |
+
context = examples[i][0]
|
81 |
+
question = examples[i][1]
|
82 |
+
question_id = examples[i][2]
|
83 |
+
# Only consider the first answer.
|
84 |
+
start_position_absolute = examples[i][3][0]
|
85 |
+
end_position_absolute = examples[i][4][0]
|
86 |
+
answers = examples[i][5]
|
87 |
+
max_context_length = args.seq_length - len(question) - 3
|
88 |
+
# Divide the context into multiple spans.
|
89 |
+
doc_spans = []
|
90 |
+
start_offset = 0
|
91 |
+
while start_offset < len(context):
|
92 |
+
length = len(context) - start_offset
|
93 |
+
if length > max_context_length:
|
94 |
+
length = max_context_length
|
95 |
+
doc_spans.append((start_offset, length))
|
96 |
+
if start_offset + length == len(context):
|
97 |
+
break
|
98 |
+
start_offset += min(length, args.doc_stride)
|
99 |
+
|
100 |
+
for doc_span_index, doc_span in enumerate(doc_spans):
|
101 |
+
start_offset = doc_span[0]
|
102 |
+
span_context = context[start_offset : start_offset + doc_span[1]]
|
103 |
+
# Convert absolute position to relative position.
|
104 |
+
start_position = start_position_absolute - start_offset + len(question) + 2
|
105 |
+
end_position = end_position_absolute - start_offset + len(question) + 2
|
106 |
+
|
107 |
+
# If span does not contain the complete answer, we use it for data augmentation.
|
108 |
+
if start_position < len(question) + 2:
|
109 |
+
start_position = len(question) + 2
|
110 |
+
if end_position > doc_span[1] + len(question) + 1:
|
111 |
+
end_position = doc_span[1] + len(question) + 1
|
112 |
+
if start_position > doc_span[1] + len(question) + 1 or end_position < len(question) + 2:
|
113 |
+
start_position, end_position = 0, 0
|
114 |
+
|
115 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(question) + [SEP_TOKEN])
|
116 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(span_context) + [SEP_TOKEN])
|
117 |
+
src = src_a + src_b
|
118 |
+
seg = [1] * len(src_a) + [2] * len(src_b)
|
119 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
120 |
+
while len(src) < args.seq_length:
|
121 |
+
src.append(PAD_ID)
|
122 |
+
seg.append(0)
|
123 |
+
|
124 |
+
dataset.append((src, seg, start_position, end_position, answers, question_id, len(question), doc_span_index, start_offset))
|
125 |
+
return dataset
|
126 |
+
|
127 |
+
|
128 |
+
def read_dataset(args, path):
|
129 |
+
examples = read_examples(path)
|
130 |
+
dataset = convert_examples_to_dataset(args, examples)
|
131 |
+
return dataset, examples
|
132 |
+
|
133 |
+
|
134 |
+
def batch_loader(batch_size, src, seg, start_position, end_position):
|
135 |
+
instances_num = src.size()[0]
|
136 |
+
for i in range(instances_num // batch_size):
|
137 |
+
src_batch = src[i * batch_size : (i + 1) * batch_size, :]
|
138 |
+
seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]
|
139 |
+
start_position_batch = start_position[i * batch_size : (i + 1) * batch_size]
|
140 |
+
end_position_batch = end_position[i * batch_size : (i + 1) * batch_size]
|
141 |
+
yield src_batch, seg_batch, start_position_batch, end_position_batch
|
142 |
+
if instances_num > instances_num // batch_size * batch_size:
|
143 |
+
src_batch = src[instances_num // batch_size * batch_size :, :]
|
144 |
+
seg_batch = seg[instances_num // batch_size * batch_size :, :]
|
145 |
+
start_position_batch = start_position[instances_num // batch_size * batch_size :]
|
146 |
+
end_position_batch = end_position[instances_num // batch_size * batch_size :]
|
147 |
+
yield src_batch, seg_batch, start_position_batch, end_position_batch
|
148 |
+
|
149 |
+
|
150 |
+
def train(args, model, optimizer, scheduler, src_batch, seg_batch, start_position_batch, end_position_batch):
|
151 |
+
model.zero_grad()
|
152 |
+
|
153 |
+
src_batch = src_batch.to(args.device)
|
154 |
+
seg_batch = seg_batch.to(args.device)
|
155 |
+
start_position_batch = start_position_batch.to(args.device)
|
156 |
+
end_position_batch = end_position_batch.to(args.device)
|
157 |
+
|
158 |
+
loss, _, _ = model(src_batch, seg_batch, start_position_batch, end_position_batch)
|
159 |
+
if torch.cuda.device_count() > 1:
|
160 |
+
loss = torch.mean(loss)
|
161 |
+
|
162 |
+
if args.fp16:
|
163 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
164 |
+
scaled_loss.backward()
|
165 |
+
else:
|
166 |
+
loss.backward()
|
167 |
+
|
168 |
+
optimizer.step()
|
169 |
+
scheduler.step()
|
170 |
+
|
171 |
+
return loss
|
172 |
+
|
173 |
+
|
174 |
+
# Evaluation script from CMRC2018.
|
175 |
+
# We modify the tokenizer.
|
176 |
+
def mixed_segmentation(in_str, rm_punc=False):
|
177 |
+
#in_str = str(in_str).decode('utf-8').lower().strip()
|
178 |
+
n_str = str(in_str).lower().strip()
|
179 |
+
segs_out = []
|
180 |
+
temp_str = ""
|
181 |
+
sp_char = ['-',':','_','*','^','/','\\','~','`','+','=',
|
182 |
+
',','。',':','?','!','“','”',';','’','《','》','……','·','、',
|
183 |
+
'「','」','(',')','-','~','『','』']
|
184 |
+
for char in in_str:
|
185 |
+
if rm_punc and char in sp_char:
|
186 |
+
continue
|
187 |
+
#if re.search(ur'[\u4e00-\u9fa5]', char) or char in sp_char:
|
188 |
+
if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char:
|
189 |
+
if temp_str != "":
|
190 |
+
#ss = nltk.word_tokenize(temp_str)
|
191 |
+
ss = list(temp_str)
|
192 |
+
segs_out.extend(ss)
|
193 |
+
temp_str = ""
|
194 |
+
segs_out.append(char)
|
195 |
+
else:
|
196 |
+
temp_str += char
|
197 |
+
|
198 |
+
if temp_str != "":
|
199 |
+
#ss = nltk.word_tokenize(temp_str)
|
200 |
+
ss = list(temp_str)
|
201 |
+
segs_out.extend(ss)
|
202 |
+
|
203 |
+
return segs_out
|
204 |
+
|
205 |
+
|
206 |
+
def find_lcs(s1, s2):
|
207 |
+
m = [[0 for i in range(len(s2)+1)] for j in range(len(s1)+1)]
|
208 |
+
mmax = 0
|
209 |
+
p = 0
|
210 |
+
for i in range(len(s1)):
|
211 |
+
for j in range(len(s2)):
|
212 |
+
if s1[i] == s2[j]:
|
213 |
+
m[i+1][j+1] = m[i][j]+1
|
214 |
+
if m[i+1][j+1] > mmax:
|
215 |
+
mmax=m[i+1][j+1]
|
216 |
+
p=i+1
|
217 |
+
return s1[p-mmax:p], mmax
|
218 |
+
|
219 |
+
|
220 |
+
def remove_punctuation(in_str):
|
221 |
+
#in_str = str(in_str).decode('utf-8').lower().strip()
|
222 |
+
in_str = str(in_str).lower().strip()
|
223 |
+
sp_char = ['-',':','_','*','^','/','\\','~','`','+','=',
|
224 |
+
',','。',':','?','!','“','”',';','’','《','》','……','·','、',
|
225 |
+
'「','」','(',')','-','~','『','』']
|
226 |
+
out_segs = []
|
227 |
+
for char in in_str:
|
228 |
+
if char in sp_char:
|
229 |
+
continue
|
230 |
+
else:
|
231 |
+
out_segs.append(char)
|
232 |
+
return ''.join(out_segs)
|
233 |
+
|
234 |
+
|
235 |
+
def calc_f1_score(answers, prediction):
|
236 |
+
f1_scores = []
|
237 |
+
for ans in answers:
|
238 |
+
ans_segs = mixed_segmentation(ans, rm_punc=True)
|
239 |
+
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
|
240 |
+
lcs, lcs_len = find_lcs(ans_segs, prediction_segs)
|
241 |
+
if lcs_len == 0:
|
242 |
+
f1_scores.append(0)
|
243 |
+
continue
|
244 |
+
precision = 1.0*lcs_len/len(prediction_segs)
|
245 |
+
recall = 1.0*lcs_len/len(ans_segs)
|
246 |
+
f1 = (2*precision*recall)/(precision+recall)
|
247 |
+
f1_scores.append(f1)
|
248 |
+
return max(f1_scores)
|
249 |
+
|
250 |
+
|
251 |
+
def calc_em_score(answers, prediction):
|
252 |
+
em = 0
|
253 |
+
for ans in answers:
|
254 |
+
ans_ = remove_punctuation(ans)
|
255 |
+
prediction_ = remove_punctuation(prediction)
|
256 |
+
if ans_ == prediction_:
|
257 |
+
em = 1
|
258 |
+
break
|
259 |
+
return em
|
260 |
+
|
261 |
+
|
262 |
+
def get_answers(dataset, start_prob_all, end_prob_all):
|
263 |
+
previous_question_id = -1
|
264 |
+
pred_answers = []
|
265 |
+
# For each predicted answer, we store its span index, start position, end position, and score.
|
266 |
+
current_answer = (-1, -1, -1, -100.0)
|
267 |
+
for i in range(len(dataset)):
|
268 |
+
question_id = dataset[i][5]
|
269 |
+
question_length = dataset[i][6]
|
270 |
+
span_index = dataset[i][7]
|
271 |
+
start_offset = dataset[i][8]
|
272 |
+
|
273 |
+
start_scores, end_scores = start_prob_all[i], end_prob_all[i]
|
274 |
+
|
275 |
+
start_pred = torch.argmax(start_scores[question_length + 2 :], dim=0) + question_length + 2
|
276 |
+
end_pred = start_pred + torch.argmax(end_scores[start_pred:], dim=0)
|
277 |
+
score = start_scores[start_pred] + end_scores[end_pred]
|
278 |
+
|
279 |
+
start_pred_absolute = start_pred + start_offset - question_length - 2
|
280 |
+
end_pred_absolute = end_pred + start_offset - question_length - 2
|
281 |
+
|
282 |
+
if question_id == previous_question_id:
|
283 |
+
if score > current_answer[3]:
|
284 |
+
current_answer = (span_index, start_pred_absolute, end_pred_absolute, score)
|
285 |
+
else:
|
286 |
+
if i > 0:
|
287 |
+
pred_answers.append(current_answer)
|
288 |
+
previous_question_id = question_id
|
289 |
+
current_answer = (span_index, start_pred_absolute, end_pred_absolute, score)
|
290 |
+
pred_answers.append(current_answer)
|
291 |
+
return pred_answers
|
292 |
+
|
293 |
+
|
294 |
+
# Evaluation function.
|
295 |
+
def evaluate(args, dataset, examples):
|
296 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
297 |
+
seg = torch.LongTensor([sample[1] for sample in dataset])
|
298 |
+
start_position = torch.LongTensor([sample[2] for sample in dataset])
|
299 |
+
end_position = torch.LongTensor([sample[3] for sample in dataset])
|
300 |
+
|
301 |
+
batch_size = args.batch_size
|
302 |
+
instances_num = src.size()[0]
|
303 |
+
|
304 |
+
args.model.eval()
|
305 |
+
start_prob_all, end_prob_all = [], []
|
306 |
+
|
307 |
+
for i, (src_batch, seg_batch, start_position_batch, end_position_batch) in enumerate(batch_loader(batch_size, src, seg, start_position, end_position)):
|
308 |
+
src_batch = src_batch.to(args.device)
|
309 |
+
seg_batch = seg_batch.to(args.device)
|
310 |
+
start_position_batch = start_position_batch.to(args.device)
|
311 |
+
end_position_batch = end_position_batch.to(args.device)
|
312 |
+
|
313 |
+
with torch.no_grad():
|
314 |
+
loss, start_logits, end_logits = args.model(src_batch, seg_batch, start_position_batch, end_position_batch)
|
315 |
+
|
316 |
+
start_prob = nn.Softmax(dim=1)(start_logits)
|
317 |
+
end_prob = nn.Softmax(dim=1)(end_logits)
|
318 |
+
|
319 |
+
for j in range(start_prob.size()[0]):
|
320 |
+
start_prob_all.append(start_prob[j])
|
321 |
+
end_prob_all.append(end_prob[j])
|
322 |
+
|
323 |
+
pred_answers = get_answers(dataset, start_prob_all, end_prob_all)
|
324 |
+
|
325 |
+
f1, em = 0, 0
|
326 |
+
total_count, skip_count = len(examples), 0
|
327 |
+
for i in range(len(examples)):
|
328 |
+
answers = examples[i][5]
|
329 |
+
start_pred_pos = pred_answers[i][1]
|
330 |
+
end_pred_pos = pred_answers[i][2]
|
331 |
+
|
332 |
+
if end_pred_pos <= start_pred_pos:
|
333 |
+
skip_count += 1
|
334 |
+
continue
|
335 |
+
|
336 |
+
prediction = examples[i][0][start_pred_pos: end_pred_pos + 1]
|
337 |
+
|
338 |
+
f1 += calc_f1_score(answers, prediction)
|
339 |
+
em += calc_em_score(answers, prediction)
|
340 |
+
|
341 |
+
f1_score = 100.0 * f1 / total_count
|
342 |
+
em_score = 100.0 * em / total_count
|
343 |
+
avg = (f1_score + em_score) * 0.5
|
344 |
+
args.logger.info("Avg: {:.4f},F1:{:.4f},EM:{:.4f},Total:{},Skip:{}".format(avg, f1_score, em_score, total_count, skip_count))
|
345 |
+
return avg
|
346 |
+
|
347 |
+
|
348 |
+
def main():
|
349 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
350 |
+
|
351 |
+
finetune_opts(parser)
|
352 |
+
|
353 |
+
parser.add_argument("--vocab_path", default=None, type=str,
|
354 |
+
help="Path of the vocabulary file.")
|
355 |
+
parser.add_argument("--spm_model_path", default=None, type=str,
|
356 |
+
help="Path of the sentence piece model.")
|
357 |
+
parser.add_argument("--doc_stride", default=128, type=int,
|
358 |
+
help="When splitting up a long document into chunks, how much stride to take between chunks.")
|
359 |
+
|
360 |
+
args = parser.parse_args()
|
361 |
+
|
362 |
+
# Load the hyperparameters from the config file.
|
363 |
+
args = load_hyperparam(args)
|
364 |
+
|
365 |
+
set_seed(args.seed)
|
366 |
+
|
367 |
+
# Build tokenizer.
|
368 |
+
args.tokenizer = CharTokenizer(args)
|
369 |
+
|
370 |
+
# Build machine reading comprehension model.
|
371 |
+
model = MachineReadingComprehension(args)
|
372 |
+
|
373 |
+
# Load or initialize parameters.
|
374 |
+
load_or_initialize_parameters(args, model)
|
375 |
+
|
376 |
+
# Get logger.
|
377 |
+
args.logger = init_logger(args)
|
378 |
+
|
379 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
380 |
+
model = model.to(args.device)
|
381 |
+
|
382 |
+
# Build tokenizer.
|
383 |
+
args.tokenizer = CharTokenizer(args)
|
384 |
+
|
385 |
+
# Training phase.
|
386 |
+
batch_size = args.batch_size
|
387 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
388 |
+
trainset, _ = read_dataset(args, args.train_path)
|
389 |
+
instances_num = len(trainset)
|
390 |
+
|
391 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
392 |
+
|
393 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
394 |
+
|
395 |
+
optimizer, scheduler = build_optimizer(args, model)
|
396 |
+
|
397 |
+
if args.fp16:
|
398 |
+
try:
|
399 |
+
from apex import amp
|
400 |
+
except ImportError:
|
401 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
402 |
+
model, optimizer = amp.initialize(model, optimizer,opt_level=args.fp16_opt_level)
|
403 |
+
|
404 |
+
if torch.cuda.device_count() > 1:
|
405 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
406 |
+
model = torch.nn.DataParallel(model)
|
407 |
+
args.model = model
|
408 |
+
|
409 |
+
total_loss = 0.0
|
410 |
+
result = 0.0
|
411 |
+
best_result = 0.0
|
412 |
+
|
413 |
+
args.logger.info("Start training.")
|
414 |
+
|
415 |
+
for epoch in range(1, args.epochs_num + 1):
|
416 |
+
random.shuffle(trainset)
|
417 |
+
src = torch.LongTensor([sample[0] for sample in trainset])
|
418 |
+
seg = torch.LongTensor([sample[1] for sample in trainset])
|
419 |
+
start_position = torch.LongTensor([sample[2] for sample in trainset])
|
420 |
+
end_position = torch.LongTensor([sample[3] for sample in trainset])
|
421 |
+
|
422 |
+
model.train()
|
423 |
+
|
424 |
+
for i, (src_batch, seg_batch, start_position_batch, end_position_batch) in enumerate(batch_loader(batch_size, src, seg, start_position, end_position)):
|
425 |
+
loss = train(args, model, optimizer, scheduler, src_batch, seg_batch, start_position_batch, end_position_batch)
|
426 |
+
total_loss += loss.item()
|
427 |
+
if (i + 1) % args.report_steps == 0:
|
428 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i+1, total_loss / args.report_steps))
|
429 |
+
total_loss = 0.0
|
430 |
+
|
431 |
+
result = evaluate(args, *read_dataset(args, args.dev_path))
|
432 |
+
if result > best_result:
|
433 |
+
best_result = result
|
434 |
+
save_model(model, args.output_model_path)
|
435 |
+
|
436 |
+
# Evaluation phase.
|
437 |
+
if args.test_path is not None:
|
438 |
+
args.logger.info("Test set evaluation.")
|
439 |
+
if torch.cuda.device_count() > 1:
|
440 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
441 |
+
else:
|
442 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
443 |
+
evaluate(args, *read_dataset(args, args.test_path))
|
444 |
+
|
445 |
+
|
446 |
+
if __name__ == "__main__":
|
447 |
+
main()
|
train/finetune/run_dbqa.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an exmaple to wrap TencentPretrain for document-based question answering.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
|
10 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
11 |
+
sys.path.append(tencentpretrain_dir)
|
12 |
+
|
13 |
+
from tencentpretrain.utils.constants import *
|
14 |
+
from tencentpretrain.utils import *
|
15 |
+
from tencentpretrain.utils.optimizers import *
|
16 |
+
from tencentpretrain.utils.config import load_hyperparam
|
17 |
+
from tencentpretrain.utils.seed import set_seed
|
18 |
+
from tencentpretrain.utils.logging import init_logger
|
19 |
+
from tencentpretrain.model_saver import save_model
|
20 |
+
from tencentpretrain.opts import finetune_opts, tokenizer_opts, adv_opts
|
21 |
+
from finetune.run_classifier import Classifier, count_labels_num, build_optimizer, batch_loader, train_model, load_or_initialize_parameters
|
22 |
+
|
23 |
+
|
24 |
+
def read_dataset(args, path):
|
25 |
+
dataset, columns = [], {}
|
26 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
27 |
+
for line_id, line in enumerate(f):
|
28 |
+
if line_id == 0:
|
29 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
30 |
+
columns[column_name] = i
|
31 |
+
continue
|
32 |
+
line = line.rstrip("\r\n").split("\t")
|
33 |
+
qid = int(line[columns["qid"]])
|
34 |
+
tgt = int(line[columns["label"]])
|
35 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
36 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
37 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
38 |
+
src = src_a + src_b
|
39 |
+
seg = [1] * len(src_a) + [2] * len(src_b)
|
40 |
+
|
41 |
+
if len(src) > args.seq_length:
|
42 |
+
src = src[: args.seq_length]
|
43 |
+
seg = seg[: args.seq_length]
|
44 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
45 |
+
while len(src) < args.seq_length:
|
46 |
+
src.append(PAD_ID)
|
47 |
+
seg.append(0)
|
48 |
+
dataset.append((src, tgt, seg, qid))
|
49 |
+
|
50 |
+
return dataset
|
51 |
+
|
52 |
+
|
53 |
+
def gen_dataset_groupby_qid(dataset, logits_all):
|
54 |
+
dataset_groupby_qid, correct_answer_orders, scores = [], [], []
|
55 |
+
for i in range(len(dataset)):
|
56 |
+
label = dataset[i][1]
|
57 |
+
if i == 0:
|
58 |
+
qid = dataset[i][3]
|
59 |
+
# Order of the current sentence in the document.
|
60 |
+
current_order = 0
|
61 |
+
scores.append(float(logits_all[i][1].item()))
|
62 |
+
if label == 1:
|
63 |
+
# Occasionally, more than one sentences in a document contain answers.
|
64 |
+
correct_answer_orders.append(current_order)
|
65 |
+
current_order += 1
|
66 |
+
continue
|
67 |
+
if qid == dataset[i][3]:
|
68 |
+
scores.append(float(logits_all[i][1].item()))
|
69 |
+
if label == 1:
|
70 |
+
correct_answer_orders.append(current_order)
|
71 |
+
current_order += 1
|
72 |
+
else:
|
73 |
+
# For each question, we record which sentences contain answers
|
74 |
+
# and the scores of all sentences in the document.
|
75 |
+
dataset_groupby_qid.append((qid, correct_answer_orders, scores))
|
76 |
+
correct_answer_orders, scores, current_order = [], [], 0
|
77 |
+
qid = dataset[i][3]
|
78 |
+
scores.append(float(logits_all[i][1].item()))
|
79 |
+
if label == 1:
|
80 |
+
correct_answer_orders.append(current_order)
|
81 |
+
current_order += 1
|
82 |
+
dataset_groupby_qid.append((qid, correct_answer_orders, scores))
|
83 |
+
return dataset_groupby_qid
|
84 |
+
|
85 |
+
|
86 |
+
def evaluate(args, dataset):
|
87 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
88 |
+
tgt = torch.LongTensor([sample[1] for sample in dataset])
|
89 |
+
seg = torch.LongTensor([sample[2] for sample in dataset])
|
90 |
+
|
91 |
+
batch_size = args.batch_size
|
92 |
+
instances_num = src.size()[0]
|
93 |
+
|
94 |
+
args.model.eval()
|
95 |
+
|
96 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
97 |
+
src_batch = src_batch.to(args.device)
|
98 |
+
tgt_batch = tgt_batch.to(args.device)
|
99 |
+
seg_batch = seg_batch.to(args.device)
|
100 |
+
with torch.no_grad():
|
101 |
+
loss, logits = args.model(src_batch, tgt_batch, seg_batch)
|
102 |
+
if i == 0:
|
103 |
+
logits_all = logits
|
104 |
+
if i >= 1:
|
105 |
+
logits_all = torch.cat((logits_all, logits), 0)
|
106 |
+
|
107 |
+
# To calculate MRR, the results are grouped by qid.
|
108 |
+
dataset_groupby_qid = gen_dataset_groupby_qid(dataset, logits_all)
|
109 |
+
|
110 |
+
reciprocal_rank = []
|
111 |
+
for _, correct_answer_orders, scores in dataset_groupby_qid:
|
112 |
+
if len(correct_answer_orders) == 1:
|
113 |
+
sorted_scores = sorted(scores, reverse=True)
|
114 |
+
for j in range(len(sorted_scores)):
|
115 |
+
if sorted_scores[j] == scores[correct_answer_orders[0]]:
|
116 |
+
reciprocal_rank.append(1 / (j + 1))
|
117 |
+
else:
|
118 |
+
current_rank = len(scores)
|
119 |
+
sorted_scores = sorted(scores, reverse=True)
|
120 |
+
for i in range(len(correct_answer_orders)):
|
121 |
+
for j in range(len(scores)):
|
122 |
+
if sorted_scores[j] == scores[correct_answer_orders[i]] and j < current_rank:
|
123 |
+
current_rank = j
|
124 |
+
reciprocal_rank.append(1 / (current_rank + 1))
|
125 |
+
|
126 |
+
MRR = sum(reciprocal_rank) / len(reciprocal_rank)
|
127 |
+
args.logger.info("Mean Reciprocal Rank: {:.4f}".format(MRR))
|
128 |
+
return MRR
|
129 |
+
|
130 |
+
|
131 |
+
def main():
|
132 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
133 |
+
|
134 |
+
finetune_opts(parser)
|
135 |
+
|
136 |
+
tokenizer_opts(parser)
|
137 |
+
|
138 |
+
parser.add_argument("--soft_targets", action='store_true',
|
139 |
+
help="Train model with logits.")
|
140 |
+
parser.add_argument("--soft_alpha", type=float, default=0.5,
|
141 |
+
help="Weight of the soft targets loss.")
|
142 |
+
|
143 |
+
adv_opts(parser)
|
144 |
+
|
145 |
+
args = parser.parse_args()
|
146 |
+
|
147 |
+
# Load the hyperparameters from the config file.
|
148 |
+
args = load_hyperparam(args)
|
149 |
+
|
150 |
+
set_seed(args.seed)
|
151 |
+
|
152 |
+
# Count the number of labels.
|
153 |
+
args.labels_num = count_labels_num(args.train_path)
|
154 |
+
|
155 |
+
# Build tokenizer.
|
156 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
157 |
+
|
158 |
+
# Build classification model.
|
159 |
+
model = Classifier(args)
|
160 |
+
|
161 |
+
# Load or initialize parameters.
|
162 |
+
load_or_initialize_parameters(args, model)
|
163 |
+
|
164 |
+
# Get logger.
|
165 |
+
args.logger = init_logger(args)
|
166 |
+
|
167 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
168 |
+
model = model.to(args.device)
|
169 |
+
|
170 |
+
# Training phase.
|
171 |
+
trainset = read_dataset(args, args.train_path)
|
172 |
+
instances_num = len(trainset)
|
173 |
+
batch_size = args.batch_size
|
174 |
+
|
175 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
176 |
+
|
177 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
178 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
179 |
+
|
180 |
+
optimizer, scheduler = build_optimizer(args, model)
|
181 |
+
|
182 |
+
if args.fp16:
|
183 |
+
try:
|
184 |
+
from apex import amp
|
185 |
+
except ImportError:
|
186 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
187 |
+
model, optimizer = amp.initialize(model, optimizer,opt_level = args.fp16_opt_level)
|
188 |
+
args.amp = amp
|
189 |
+
|
190 |
+
if torch.cuda.device_count() > 1:
|
191 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
192 |
+
model = torch.nn.DataParallel(model)
|
193 |
+
args.model = model
|
194 |
+
|
195 |
+
if args.use_adv:
|
196 |
+
args.adv_method = str2adv[args.adv_type](model)
|
197 |
+
|
198 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
199 |
+
|
200 |
+
args.logger.info("Start training.")
|
201 |
+
|
202 |
+
for epoch in range(1, args.epochs_num + 1):
|
203 |
+
random.shuffle(trainset)
|
204 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
205 |
+
tgt = torch.LongTensor([example[1] for example in trainset])
|
206 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
207 |
+
|
208 |
+
model.train()
|
209 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
210 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
211 |
+
total_loss += loss.item()
|
212 |
+
if (i + 1) % args.report_steps == 0:
|
213 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
214 |
+
total_loss = 0.0
|
215 |
+
|
216 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
217 |
+
if result > best_result:
|
218 |
+
best_result = result
|
219 |
+
save_model(model, args.output_model_path)
|
220 |
+
|
221 |
+
# Evaluation phase.
|
222 |
+
if args.test_path is not None:
|
223 |
+
args.logger.info("Test set evaluation.")
|
224 |
+
if torch.cuda.device_count() > 1:
|
225 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
226 |
+
else:
|
227 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
228 |
+
evaluate(args, read_dataset(args, args.test_path))
|
229 |
+
|
230 |
+
|
231 |
+
if __name__ == "__main__":
|
232 |
+
main()
|
train/finetune/run_image_classifier.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an exmaple to wrap TencentPretrain for image classification.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
import torchvision.datasets as dest
|
11 |
+
from torchvision import transforms
|
12 |
+
from torchvision.io import read_image
|
13 |
+
from torchvision.io.image import ImageReadMode
|
14 |
+
|
15 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
16 |
+
sys.path.append(tencentpretrain_dir)
|
17 |
+
|
18 |
+
from tencentpretrain.layers import *
|
19 |
+
from tencentpretrain.encoders import *
|
20 |
+
from tencentpretrain.utils.vocab import Vocab
|
21 |
+
from tencentpretrain.utils.constants import *
|
22 |
+
from tencentpretrain.utils import *
|
23 |
+
from tencentpretrain.utils.optimizers import *
|
24 |
+
from tencentpretrain.utils.config import load_hyperparam
|
25 |
+
from tencentpretrain.utils.misc import ZeroOneNormalize, count_lines
|
26 |
+
from tencentpretrain.utils.seed import set_seed
|
27 |
+
from tencentpretrain.model_saver import save_model
|
28 |
+
from tencentpretrain.opts import finetune_opts
|
29 |
+
from finetune.run_classifier import *
|
30 |
+
|
31 |
+
|
32 |
+
def data_loader(args, path):
|
33 |
+
|
34 |
+
transform = transforms.Compose([
|
35 |
+
transforms.Resize((args.image_height, args.image_width)),
|
36 |
+
ZeroOneNormalize()
|
37 |
+
])
|
38 |
+
|
39 |
+
dataset, columns = [], {}
|
40 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
41 |
+
src_batch, tgt_batch, seg_batch = [], [], []
|
42 |
+
for line_id, line in enumerate(f):
|
43 |
+
if line_id == 0:
|
44 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
45 |
+
columns[column_name] = i
|
46 |
+
continue
|
47 |
+
line = line.rstrip("\r\n").split("\t")
|
48 |
+
tgt = int(line[columns["label"]])
|
49 |
+
path = line[columns["path"]]
|
50 |
+
image = read_image(path, ImageReadMode.RGB)
|
51 |
+
image = image.to(args.device)
|
52 |
+
src = transform(image)
|
53 |
+
seg = [1] * ((src.size()[1] // args.patch_size) * (src.size()[2] // args.patch_size) + 1)
|
54 |
+
|
55 |
+
src_batch.append(src)
|
56 |
+
tgt_batch.append(tgt)
|
57 |
+
seg_batch.append(seg)
|
58 |
+
|
59 |
+
if len(src_batch) == args.batch_size:
|
60 |
+
yield torch.stack(src_batch, 0), \
|
61 |
+
torch.LongTensor(tgt_batch), \
|
62 |
+
torch.LongTensor(seg_batch)
|
63 |
+
src_batch, tgt_batch, seg_batch = [], [], []
|
64 |
+
|
65 |
+
if len(src_batch) > 0:
|
66 |
+
yield torch.stack(src_batch, 0), \
|
67 |
+
torch.LongTensor(tgt_batch), \
|
68 |
+
torch.LongTensor(seg_batch)
|
69 |
+
|
70 |
+
|
71 |
+
def evaluate(args, dataset_path):
|
72 |
+
|
73 |
+
correct, instances_num = 0, 0
|
74 |
+
# Confusion matrix.
|
75 |
+
confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long)
|
76 |
+
|
77 |
+
args.model.eval()
|
78 |
+
|
79 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(data_loader(args, dataset_path)):
|
80 |
+
src_batch = src_batch.to(args.device)
|
81 |
+
tgt_batch = tgt_batch.to(args.device)
|
82 |
+
seg_batch = seg_batch.to(args.device)
|
83 |
+
with torch.no_grad():
|
84 |
+
_, logits = args.model(src_batch, tgt_batch, seg_batch)
|
85 |
+
pred = torch.argmax(nn.Softmax(dim=1)(logits), dim=1)
|
86 |
+
gold = tgt_batch
|
87 |
+
for j in range(pred.size()[0]):
|
88 |
+
confusion[pred[j], gold[j]] += 1
|
89 |
+
correct += torch.sum(pred == gold).item()
|
90 |
+
instances_num += len(pred)
|
91 |
+
|
92 |
+
args.logger.info("Confusion matrix:")
|
93 |
+
args.logger.info(confusion)
|
94 |
+
args.logger.info("Report precision, recall, and f1:")
|
95 |
+
|
96 |
+
eps = 1e-9
|
97 |
+
for i in range(confusion.size()[0]):
|
98 |
+
p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps)
|
99 |
+
r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps)
|
100 |
+
f1 = 2 * p * r / (p + r + eps)
|
101 |
+
args.logger.info("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1))
|
102 |
+
|
103 |
+
args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / instances_num, correct, instances_num))
|
104 |
+
return correct / instances_num, confusion
|
105 |
+
|
106 |
+
|
107 |
+
def main():
|
108 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
109 |
+
|
110 |
+
finetune_opts(parser)
|
111 |
+
|
112 |
+
tokenizer_opts(parser)
|
113 |
+
|
114 |
+
adv_opts(parser)
|
115 |
+
|
116 |
+
args = parser.parse_args()
|
117 |
+
|
118 |
+
# Load the hyperparameters from the config file.
|
119 |
+
args = load_hyperparam(args)
|
120 |
+
args.soft_targets, args.soft_alpha = False, 0
|
121 |
+
|
122 |
+
# Count the number of labels.
|
123 |
+
args.labels_num = count_labels_num(args.train_path)
|
124 |
+
instances_num = count_lines(args.train_path) - 1
|
125 |
+
|
126 |
+
|
127 |
+
# Build tokenizer.
|
128 |
+
args.tokenizer = str2tokenizer["virtual"](args)
|
129 |
+
set_seed(args.seed)
|
130 |
+
|
131 |
+
# Build classification model.
|
132 |
+
model = Classifier(args)
|
133 |
+
|
134 |
+
# Load or initialize parameters.
|
135 |
+
load_or_initialize_parameters(args, model)
|
136 |
+
|
137 |
+
# Get logger.
|
138 |
+
args.logger = init_logger(args)
|
139 |
+
|
140 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
141 |
+
model = model.to(args.device)
|
142 |
+
|
143 |
+
# Training phase.
|
144 |
+
batch_size = args.batch_size
|
145 |
+
|
146 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
147 |
+
|
148 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
149 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
150 |
+
optimizer, scheduler = build_optimizer(args, model)
|
151 |
+
|
152 |
+
if args.fp16:
|
153 |
+
try:
|
154 |
+
from apex import amp
|
155 |
+
except ImportError:
|
156 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
157 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
158 |
+
args.amp = amp
|
159 |
+
|
160 |
+
if torch.cuda.device_count() > 1:
|
161 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
162 |
+
model = torch.nn.DataParallel(model)
|
163 |
+
args.model = model
|
164 |
+
|
165 |
+
if args.use_adv:
|
166 |
+
args.adv_method = str2adv[args.adv_type](model)
|
167 |
+
|
168 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
169 |
+
|
170 |
+
args.logger.info("Start training.")
|
171 |
+
for epoch in range(1, args.epochs_num + 1):
|
172 |
+
model.train()
|
173 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(data_loader(args, args.train_path)):
|
174 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
175 |
+
total_loss += loss.item()
|
176 |
+
if (i + 1) % args.report_steps == 0:
|
177 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
178 |
+
total_loss = 0.0
|
179 |
+
|
180 |
+
result = evaluate(args, args.dev_path)
|
181 |
+
if result[0] > best_result:
|
182 |
+
best_result = result[0]
|
183 |
+
save_model(model, args.output_model_path)
|
184 |
+
|
185 |
+
# Evaluation phase.
|
186 |
+
if args.test_path is not None:
|
187 |
+
args.logger.info("Test set evaluation.")
|
188 |
+
if torch.cuda.device_count() > 1:
|
189 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
190 |
+
else:
|
191 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
192 |
+
evaluate(args, args.test_path)
|
193 |
+
|
194 |
+
if __name__ == "__main__":
|
195 |
+
main()
|
train/finetune/run_ner.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for NER.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import json
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
|
13 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
14 |
+
sys.path.append(tencentpretrain_dir)
|
15 |
+
|
16 |
+
from tencentpretrain.embeddings import *
|
17 |
+
from tencentpretrain.encoders import *
|
18 |
+
from tencentpretrain.utils.config import load_hyperparam
|
19 |
+
from tencentpretrain.utils.optimizers import *
|
20 |
+
from tencentpretrain.utils.constants import *
|
21 |
+
from tencentpretrain.utils.vocab import Vocab
|
22 |
+
from tencentpretrain.utils.seed import set_seed
|
23 |
+
from tencentpretrain.utils.logging import init_logger
|
24 |
+
from tencentpretrain.utils.tokenizers import *
|
25 |
+
from tencentpretrain.model_saver import save_model
|
26 |
+
from tencentpretrain.opts import finetune_opts
|
27 |
+
from finetune.run_classifier import build_optimizer, load_or_initialize_parameters
|
28 |
+
|
29 |
+
|
30 |
+
class NerTagger(nn.Module):
|
31 |
+
def __init__(self, args):
|
32 |
+
super(NerTagger, self).__init__()
|
33 |
+
self.embedding = Embedding(args)
|
34 |
+
for embedding_name in args.embedding:
|
35 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
36 |
+
self.embedding.update(tmp_emb, embedding_name)
|
37 |
+
self.encoder = str2encoder[args.encoder](args)
|
38 |
+
self.labels_num = args.labels_num
|
39 |
+
self.output_layer = nn.Linear(args.hidden_size, self.labels_num)
|
40 |
+
self.crf_target = args.crf_target
|
41 |
+
if args.crf_target:
|
42 |
+
from torchcrf import CRF
|
43 |
+
self.crf = CRF(self.labels_num, batch_first=True)
|
44 |
+
self.seq_length = args.seq_length
|
45 |
+
|
46 |
+
def forward(self, src, tgt, seg):
|
47 |
+
"""
|
48 |
+
Args:
|
49 |
+
src: [batch_size x seq_length]
|
50 |
+
tgt: [batch_size x seq_length]
|
51 |
+
seg: [batch_size x seq_length]
|
52 |
+
Returns:
|
53 |
+
loss: Sequence labeling loss.
|
54 |
+
logits: Output logits.
|
55 |
+
"""
|
56 |
+
# Embedding.
|
57 |
+
emb = self.embedding(src, seg)
|
58 |
+
# Encoder.
|
59 |
+
output = self.encoder(emb, seg)
|
60 |
+
|
61 |
+
# Target.
|
62 |
+
logits = self.output_layer(output)
|
63 |
+
if self.crf_target:
|
64 |
+
tgt_mask = seg.type(torch.uint8)
|
65 |
+
pred = self.crf.decode(logits, mask=tgt_mask)
|
66 |
+
for j in range(len(pred)):
|
67 |
+
while len(pred[j]) < self.seq_length:
|
68 |
+
pred[j].append(self.labels_num - 1)
|
69 |
+
pred = torch.tensor(pred).contiguous().view(-1)
|
70 |
+
if tgt is not None:
|
71 |
+
loss = -self.crf(F.log_softmax(logits, 2), tgt, mask=tgt_mask, reduction='mean')
|
72 |
+
return loss, pred
|
73 |
+
else:
|
74 |
+
return None, pred
|
75 |
+
else:
|
76 |
+
tgt_mask = seg.contiguous().view(-1).float()
|
77 |
+
logits = logits.contiguous().view(-1, self.labels_num)
|
78 |
+
pred = logits.argmax(dim=-1)
|
79 |
+
if tgt is not None:
|
80 |
+
tgt = tgt.contiguous().view(-1, 1)
|
81 |
+
one_hot = torch.zeros(tgt.size(0), self.labels_num). \
|
82 |
+
to(torch.device(tgt.device)). \
|
83 |
+
scatter_(1, tgt, 1.0)
|
84 |
+
numerator = -torch.sum(nn.LogSoftmax(dim=-1)(logits) * one_hot, 1)
|
85 |
+
numerator = torch.sum(tgt_mask * numerator)
|
86 |
+
denominator = torch.sum(tgt_mask) + 1e-6
|
87 |
+
loss = numerator / denominator
|
88 |
+
return loss, pred
|
89 |
+
else:
|
90 |
+
return None, pred
|
91 |
+
|
92 |
+
|
93 |
+
def read_dataset(args, path):
|
94 |
+
dataset, columns = [], {}
|
95 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
96 |
+
for line_id, line in enumerate(f):
|
97 |
+
if line_id == 0:
|
98 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
99 |
+
columns[column_name] = i
|
100 |
+
continue
|
101 |
+
line = line.rstrip("\r\n").split("\t")
|
102 |
+
labels = line[columns["label"]]
|
103 |
+
tgt = [args.l2i[l] for l in labels.split(" ")]
|
104 |
+
|
105 |
+
text_a = line[columns["text_a"]]
|
106 |
+
src = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_a))
|
107 |
+
seg = [1] * len(src)
|
108 |
+
|
109 |
+
if len(src) > args.seq_length:
|
110 |
+
src = src[: args.seq_length]
|
111 |
+
tgt = tgt[: args.seq_length]
|
112 |
+
seg = seg[: args.seq_length]
|
113 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
114 |
+
while len(src) < args.seq_length:
|
115 |
+
src.append(PAD_ID)
|
116 |
+
tgt.append(args.labels_num - 1)
|
117 |
+
seg.append(0)
|
118 |
+
dataset.append([src, tgt, seg])
|
119 |
+
|
120 |
+
return dataset
|
121 |
+
|
122 |
+
|
123 |
+
def batch_loader(batch_size, src, tgt, seg):
|
124 |
+
instances_num = src.size()[0]
|
125 |
+
for i in range(instances_num // batch_size):
|
126 |
+
src_batch = src[i * batch_size : (i + 1) * batch_size, :]
|
127 |
+
tgt_batch = tgt[i * batch_size : (i + 1) * batch_size, :]
|
128 |
+
seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]
|
129 |
+
yield src_batch, tgt_batch, seg_batch
|
130 |
+
if instances_num > instances_num // batch_size * batch_size:
|
131 |
+
src_batch = src[instances_num // batch_size * batch_size :, :]
|
132 |
+
tgt_batch = tgt[instances_num // batch_size * batch_size :, :]
|
133 |
+
seg_batch = seg[instances_num // batch_size * batch_size :, :]
|
134 |
+
yield src_batch, tgt_batch, seg_batch
|
135 |
+
|
136 |
+
|
137 |
+
def train(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch):
|
138 |
+
model.zero_grad()
|
139 |
+
|
140 |
+
src_batch = src_batch.to(args.device)
|
141 |
+
tgt_batch = tgt_batch.to(args.device)
|
142 |
+
seg_batch = seg_batch.to(args.device)
|
143 |
+
|
144 |
+
loss, _ = model(src_batch, tgt_batch, seg_batch)
|
145 |
+
if torch.cuda.device_count() > 1:
|
146 |
+
loss = torch.mean(loss)
|
147 |
+
|
148 |
+
if args.fp16:
|
149 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
150 |
+
scaled_loss.backward()
|
151 |
+
else:
|
152 |
+
loss.backward()
|
153 |
+
|
154 |
+
optimizer.step()
|
155 |
+
scheduler.step()
|
156 |
+
|
157 |
+
return loss
|
158 |
+
|
159 |
+
|
160 |
+
def evaluate(args, dataset):
|
161 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
162 |
+
tgt = torch.LongTensor([sample[1] for sample in dataset])
|
163 |
+
seg = torch.LongTensor([sample[2] for sample in dataset])
|
164 |
+
|
165 |
+
instances_num = src.size(0)
|
166 |
+
batch_size = args.batch_size
|
167 |
+
|
168 |
+
correct, gold_entities_num, pred_entities_num = 0, 0, 0
|
169 |
+
|
170 |
+
args.model.eval()
|
171 |
+
|
172 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
173 |
+
src_batch = src_batch.to(args.device)
|
174 |
+
tgt_batch = tgt_batch.to(args.device)
|
175 |
+
seg_batch = seg_batch.to(args.device)
|
176 |
+
loss, pred = args.model(src_batch, tgt_batch, seg_batch)
|
177 |
+
|
178 |
+
gold = tgt_batch.contiguous().view(-1, 1)
|
179 |
+
|
180 |
+
for j in range(gold.size()[0]):
|
181 |
+
if gold[j].item() in args.begin_ids:
|
182 |
+
gold_entities_num += 1
|
183 |
+
|
184 |
+
for j in range(pred.size()[0]):
|
185 |
+
if pred[j].item() in args.begin_ids and gold[j].item() != args.l2i["[PAD]"]:
|
186 |
+
pred_entities_num += 1
|
187 |
+
|
188 |
+
pred_entities_pos, gold_entities_pos = set(), set()
|
189 |
+
|
190 |
+
for j in range(gold.size()[0]):
|
191 |
+
if gold[j].item() in args.begin_ids:
|
192 |
+
start = j
|
193 |
+
for k in range(j + 1, gold.size()[0]):
|
194 |
+
if gold[k].item() == args.l2i["[PAD]"] or gold[k].item() == args.l2i["O"] or gold[k].item() in args.begin_ids:
|
195 |
+
end = k - 1
|
196 |
+
break
|
197 |
+
else:
|
198 |
+
end = gold.size()[0] - 1
|
199 |
+
gold_entities_pos.add((start, end))
|
200 |
+
|
201 |
+
for j in range(pred.size()[0]):
|
202 |
+
if pred[j].item() in args.begin_ids and gold[j].item() != args.l2i["[PAD]"]:
|
203 |
+
start = j
|
204 |
+
for k in range(j + 1, pred.size()[0]):
|
205 |
+
if pred[k].item() == args.l2i["[PAD]"] or pred[k].item() == args.l2i["O"] or pred[k].item() in args.begin_ids:
|
206 |
+
end = k - 1
|
207 |
+
break
|
208 |
+
else:
|
209 |
+
end = pred.size()[0] - 1
|
210 |
+
pred_entities_pos.add((start, end))
|
211 |
+
|
212 |
+
for entity in pred_entities_pos:
|
213 |
+
if entity not in gold_entities_pos:
|
214 |
+
continue
|
215 |
+
for j in range(entity[0], entity[1] + 1):
|
216 |
+
if gold[j].item() != pred[j].item():
|
217 |
+
break
|
218 |
+
else:
|
219 |
+
correct += 1
|
220 |
+
|
221 |
+
args.logger.info("Report precision, recall, and f1:")
|
222 |
+
eps = 1e-9
|
223 |
+
p = correct / (pred_entities_num + eps)
|
224 |
+
r = correct / (gold_entities_num + eps)
|
225 |
+
f1 = 2 * p * r / (p + r + eps)
|
226 |
+
args.logger.info("{:.3f}, {:.3f}, {:.3f}".format(p, r, f1))
|
227 |
+
|
228 |
+
return f1
|
229 |
+
|
230 |
+
|
231 |
+
def main():
|
232 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
233 |
+
|
234 |
+
finetune_opts(parser)
|
235 |
+
|
236 |
+
parser.add_argument("--vocab_path", default=None, type=str,
|
237 |
+
help="Path of the vocabulary file.")
|
238 |
+
parser.add_argument("--spm_model_path", default=None, type=str,
|
239 |
+
help="Path of the sentence piece model.")
|
240 |
+
parser.add_argument("--label2id_path", type=str, required=True,
|
241 |
+
help="Path of the label2id file.")
|
242 |
+
parser.add_argument("--crf_target", action="store_true",
|
243 |
+
help="Use CRF loss as the target function or not, default False.")
|
244 |
+
|
245 |
+
args = parser.parse_args()
|
246 |
+
|
247 |
+
# Load the hyperparameters of the config file.
|
248 |
+
args = load_hyperparam(args)
|
249 |
+
|
250 |
+
# Get logger.
|
251 |
+
args.logger = init_logger(args)
|
252 |
+
|
253 |
+
set_seed(args.seed)
|
254 |
+
|
255 |
+
args.begin_ids = []
|
256 |
+
|
257 |
+
with open(args.label2id_path, mode="r", encoding="utf-8") as f:
|
258 |
+
l2i = json.load(f)
|
259 |
+
args.logger.info("Labels: " + str(l2i))
|
260 |
+
l2i["[PAD]"] = len(l2i)
|
261 |
+
for label in l2i:
|
262 |
+
if label.startswith("B"):
|
263 |
+
args.begin_ids.append(l2i[label])
|
264 |
+
|
265 |
+
args.l2i = l2i
|
266 |
+
|
267 |
+
args.labels_num = len(l2i)
|
268 |
+
|
269 |
+
args.tokenizer = SpaceTokenizer(args)
|
270 |
+
|
271 |
+
# Build sequence labeling model.
|
272 |
+
model = NerTagger(args)
|
273 |
+
|
274 |
+
# Load or initialize parameters.
|
275 |
+
load_or_initialize_parameters(args, model)
|
276 |
+
|
277 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
278 |
+
model = model.to(args.device)
|
279 |
+
|
280 |
+
# Training phase.
|
281 |
+
instances = read_dataset(args, args.train_path)
|
282 |
+
instances_num = len(instances)
|
283 |
+
batch_size = args.batch_size
|
284 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
285 |
+
|
286 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
287 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
288 |
+
|
289 |
+
optimizer, scheduler = build_optimizer(args, model)
|
290 |
+
|
291 |
+
if args.fp16:
|
292 |
+
try:
|
293 |
+
from apex import amp
|
294 |
+
except ImportError:
|
295 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
296 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level = args.fp16_opt_level)
|
297 |
+
|
298 |
+
if torch.cuda.device_count() > 1:
|
299 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
300 |
+
model = torch.nn.DataParallel(model)
|
301 |
+
args.model = model
|
302 |
+
|
303 |
+
total_loss, f1, best_f1 = 0.0, 0.0, 0.0
|
304 |
+
|
305 |
+
args.logger.info("Start training.")
|
306 |
+
|
307 |
+
for epoch in range(1, args.epochs_num + 1):
|
308 |
+
random.shuffle(instances)
|
309 |
+
src = torch.LongTensor([ins[0] for ins in instances])
|
310 |
+
tgt = torch.LongTensor([ins[1] for ins in instances])
|
311 |
+
seg = torch.LongTensor([ins[2] for ins in instances])
|
312 |
+
|
313 |
+
model.train()
|
314 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
315 |
+
loss = train(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
|
316 |
+
total_loss += loss.item()
|
317 |
+
if (i + 1) % args.report_steps == 0:
|
318 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
319 |
+
total_loss = 0.0
|
320 |
+
|
321 |
+
f1 = evaluate(args, read_dataset(args, args.dev_path))
|
322 |
+
if f1 > best_f1:
|
323 |
+
best_f1 = f1
|
324 |
+
save_model(model, args.output_model_path)
|
325 |
+
else:
|
326 |
+
continue
|
327 |
+
|
328 |
+
# Evaluation phase.
|
329 |
+
if args.test_path is not None:
|
330 |
+
args.logger.info("Test set evaluation.")
|
331 |
+
if torch.cuda.device_count() > 1:
|
332 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
333 |
+
else:
|
334 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
335 |
+
evaluate(args, read_dataset(args, args.test_path))
|
336 |
+
|
337 |
+
|
338 |
+
if __name__ == "__main__":
|
339 |
+
main()
|
train/finetune/run_regression.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for regression.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
|
11 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
12 |
+
sys.path.append(tencentpretrain_dir)
|
13 |
+
|
14 |
+
from finetune.run_classifier import *
|
15 |
+
from scipy.stats import spearmanr
|
16 |
+
|
17 |
+
|
18 |
+
class Regression(nn.Module):
|
19 |
+
def __init__(self, args):
|
20 |
+
super(Regression, self).__init__()
|
21 |
+
self.embedding = Embedding(args)
|
22 |
+
for embedding_name in args.embedding:
|
23 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
24 |
+
self.embedding.update(tmp_emb, embedding_name)
|
25 |
+
self.encoder = str2encoder[args.encoder](args)
|
26 |
+
self.pooling_type = args.pooling
|
27 |
+
self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
|
28 |
+
self.output_layer_2 = nn.Linear(args.hidden_size, 1)
|
29 |
+
|
30 |
+
def forward(self, src, tgt, seg, soft_tgt=None):
|
31 |
+
"""
|
32 |
+
Args:
|
33 |
+
src: [batch_size x seq_length]
|
34 |
+
tgt: [batch_size]
|
35 |
+
seg: [batch_size x seq_length]
|
36 |
+
"""
|
37 |
+
# Embedding.
|
38 |
+
emb = self.embedding(src, seg)
|
39 |
+
# Encoder.
|
40 |
+
output = self.encoder(emb, seg)
|
41 |
+
# Target.
|
42 |
+
output = pooling(output, seg, self.pooling_type)
|
43 |
+
output = torch.tanh(self.output_layer_1(output))
|
44 |
+
logits = self.output_layer_2(output)
|
45 |
+
if tgt is not None:
|
46 |
+
loss = nn.MSELoss()(logits.view(-1), tgt.view(-1))
|
47 |
+
return loss, logits
|
48 |
+
else:
|
49 |
+
return None, logits
|
50 |
+
|
51 |
+
|
52 |
+
def read_dataset(args, path):
|
53 |
+
dataset, columns = [], {}
|
54 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
55 |
+
for line_id, line in enumerate(f):
|
56 |
+
if line_id == 0:
|
57 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
58 |
+
columns[column_name] = i
|
59 |
+
continue
|
60 |
+
line = line.rstrip("\r\n").split("\t")
|
61 |
+
tgt = float(line[columns["label"]])
|
62 |
+
if "text_b" not in columns:
|
63 |
+
text_a = line[columns["text_a"]]
|
64 |
+
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
65 |
+
seg = [1] * len(src)
|
66 |
+
else:
|
67 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
68 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
69 |
+
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
70 |
+
src = src_a + src_b
|
71 |
+
seg = [1] * len(src_a) + [2] * len(src_b)
|
72 |
+
|
73 |
+
if len(src) > args.seq_length:
|
74 |
+
src = src[: args.seq_length]
|
75 |
+
seg = seg[: args.seq_length]
|
76 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
77 |
+
while len(src) < args.seq_length:
|
78 |
+
src.append(PAD_ID)
|
79 |
+
seg.append(0)
|
80 |
+
dataset.append((src, tgt, seg))
|
81 |
+
|
82 |
+
return dataset
|
83 |
+
|
84 |
+
|
85 |
+
def evaluate(args, dataset):
|
86 |
+
src = torch.LongTensor([sample[0] for sample in dataset])
|
87 |
+
tgt = torch.FloatTensor([sample[1] for sample in dataset])
|
88 |
+
seg = torch.LongTensor([sample[2] for sample in dataset])
|
89 |
+
pred_list = []
|
90 |
+
gold_list = []
|
91 |
+
batch_size = args.batch_size
|
92 |
+
|
93 |
+
args.model.eval()
|
94 |
+
|
95 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
96 |
+
src_batch = src_batch.to(args.device)
|
97 |
+
tgt_batch = tgt_batch.to(args.device)
|
98 |
+
seg_batch = seg_batch.to(args.device)
|
99 |
+
with torch.no_grad():
|
100 |
+
_, pred = args.model(src_batch, tgt_batch, seg_batch)
|
101 |
+
gold = tgt_batch
|
102 |
+
pred_list += pred.tolist()
|
103 |
+
gold_list += gold.tolist()
|
104 |
+
spearman_corr, _ = spearmanr(gold_list, pred_list)
|
105 |
+
|
106 |
+
args.logger.info("Spearman corr: {:.4f}".format(spearman_corr))
|
107 |
+
return spearman_corr
|
108 |
+
|
109 |
+
|
110 |
+
def main():
|
111 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
112 |
+
|
113 |
+
finetune_opts(parser)
|
114 |
+
|
115 |
+
tokenizer_opts(parser)
|
116 |
+
|
117 |
+
adv_opts(parser)
|
118 |
+
|
119 |
+
args = parser.parse_args()
|
120 |
+
|
121 |
+
# Load the hyperparameters from the config file.
|
122 |
+
args = load_hyperparam(args)
|
123 |
+
|
124 |
+
# Build tokenizer.
|
125 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
126 |
+
set_seed(args.seed)
|
127 |
+
|
128 |
+
model = Regression(args)
|
129 |
+
|
130 |
+
# Load or initialize parameters.
|
131 |
+
load_or_initialize_parameters(args, model)
|
132 |
+
|
133 |
+
# Get logger.
|
134 |
+
args.logger = init_logger(args)
|
135 |
+
|
136 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
137 |
+
model = model.to(args.device)
|
138 |
+
|
139 |
+
# Training phase.
|
140 |
+
trainset = read_dataset(args, args.train_path)
|
141 |
+
instances_num = len(trainset)
|
142 |
+
batch_size = args.batch_size
|
143 |
+
|
144 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
145 |
+
|
146 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
147 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
148 |
+
optimizer, scheduler = build_optimizer(args, model)
|
149 |
+
|
150 |
+
if args.fp16:
|
151 |
+
try:
|
152 |
+
from apex import amp
|
153 |
+
except ImportError:
|
154 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
155 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
156 |
+
args.amp = amp
|
157 |
+
|
158 |
+
if torch.cuda.device_count() > 1:
|
159 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
160 |
+
model = torch.nn.DataParallel(model)
|
161 |
+
args.model = model
|
162 |
+
|
163 |
+
if args.use_adv:
|
164 |
+
args.adv_method = str2adv[args.adv_type](model)
|
165 |
+
|
166 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
167 |
+
|
168 |
+
args.logger.info("Start training.")
|
169 |
+
for epoch in range(1, args.epochs_num + 1):
|
170 |
+
random.shuffle(trainset)
|
171 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
172 |
+
tgt = torch.FloatTensor([example[1] for example in trainset])
|
173 |
+
seg = torch.LongTensor([example[2] for example in trainset])
|
174 |
+
|
175 |
+
model.train()
|
176 |
+
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg, None)):
|
177 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, None)
|
178 |
+
total_loss += loss.item()
|
179 |
+
if (i + 1) % args.report_steps == 0:
|
180 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
181 |
+
total_loss = 0.0
|
182 |
+
|
183 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
184 |
+
if result > best_result:
|
185 |
+
best_result = result
|
186 |
+
save_model(model, args.output_model_path)
|
187 |
+
|
188 |
+
# Evaluation phase.
|
189 |
+
if args.test_path is not None:
|
190 |
+
args.logger.info("Test set evaluation.")
|
191 |
+
if torch.cuda.device_count() > 1:
|
192 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
193 |
+
else:
|
194 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
195 |
+
evaluate(args, read_dataset(args, args.test_path))
|
196 |
+
|
197 |
+
|
198 |
+
if __name__ == "__main__":
|
199 |
+
main()
|
train/finetune/run_simcse.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for SimCSE.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import math
|
9 |
+
import scipy.stats
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
15 |
+
sys.path.append(tencentpretrain_dir)
|
16 |
+
|
17 |
+
from tencentpretrain.embeddings import *
|
18 |
+
from tencentpretrain.encoders import *
|
19 |
+
from tencentpretrain.targets import *
|
20 |
+
from tencentpretrain.utils.vocab import Vocab
|
21 |
+
from tencentpretrain.utils.constants import *
|
22 |
+
from tencentpretrain.utils import *
|
23 |
+
from tencentpretrain.utils.optimizers import *
|
24 |
+
from tencentpretrain.utils.config import load_hyperparam
|
25 |
+
from tencentpretrain.utils.seed import set_seed
|
26 |
+
from tencentpretrain.utils.logging import init_logger
|
27 |
+
from tencentpretrain.model_saver import save_model
|
28 |
+
from tencentpretrain.opts import finetune_opts, tokenizer_opts
|
29 |
+
from finetune.run_classifier import count_labels_num, build_optimizer, load_or_initialize_parameters
|
30 |
+
from finetune.run_classifier_siamese import batch_loader
|
31 |
+
|
32 |
+
|
33 |
+
class SimCSE(nn.Module):
|
34 |
+
def __init__(self, args):
|
35 |
+
super(SimCSE, self).__init__()
|
36 |
+
self.embedding = Embedding(args)
|
37 |
+
for embedding_name in args.embedding:
|
38 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
39 |
+
self.embedding.update(tmp_emb, embedding_name)
|
40 |
+
self.encoder = str2encoder[args.encoder](args)
|
41 |
+
|
42 |
+
self.pooling_type = args.pooling
|
43 |
+
|
44 |
+
def forward(self, src, seg):
|
45 |
+
"""
|
46 |
+
Args:
|
47 |
+
src: [batch_size x seq_length]
|
48 |
+
tgt: [batch_size]
|
49 |
+
seg: [batch_size x seq_length]
|
50 |
+
"""
|
51 |
+
# Embedding.
|
52 |
+
emb_0 = self.embedding(src[0], seg[0])
|
53 |
+
emb_1 = self.embedding(src[1], seg[1])
|
54 |
+
# Encoder.
|
55 |
+
output_0 = self.encoder(emb_0, seg[0])
|
56 |
+
output_1 = self.encoder(emb_1, seg[1])
|
57 |
+
# Target.
|
58 |
+
features_0 = self.pooling(output_0, seg[0], self.pooling_type)
|
59 |
+
features_1 = self.pooling(output_1, seg[1], self.pooling_type)
|
60 |
+
|
61 |
+
return features_0, features_1
|
62 |
+
|
63 |
+
def pooling(self, memory_bank, seg, pooling_type):
|
64 |
+
seg = torch.unsqueeze(seg, dim=-1).type(torch.float)
|
65 |
+
memory_bank = memory_bank * seg
|
66 |
+
if pooling_type == "mean":
|
67 |
+
features = torch.sum(memory_bank, dim=1)
|
68 |
+
features = torch.div(features, torch.sum(seg, dim=1))
|
69 |
+
elif pooling_type == "last":
|
70 |
+
features = memory_bank[torch.arange(memory_bank.shape[0]), torch.squeeze(torch.sum(seg, dim=1).type(torch.int64) - 1), :]
|
71 |
+
elif pooling_type == "max":
|
72 |
+
features = torch.max(memory_bank + (seg - 1) * sys.maxsize, dim=1)[0]
|
73 |
+
else:
|
74 |
+
features = memory_bank[:, 0, :]
|
75 |
+
return features
|
76 |
+
|
77 |
+
|
78 |
+
def read_dataset(args, path):
|
79 |
+
dataset, columns = [], {}
|
80 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
81 |
+
for line_id, line in enumerate(f):
|
82 |
+
if line_id == 0:
|
83 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
84 |
+
columns[column_name] = i
|
85 |
+
continue
|
86 |
+
line = line.rstrip("\r\n").split("\t")
|
87 |
+
|
88 |
+
if "text_b" in columns:
|
89 |
+
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
|
90 |
+
else:
|
91 |
+
text_a = line[columns["text_a"]]
|
92 |
+
text_b = text_a
|
93 |
+
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
|
94 |
+
src_b = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
|
95 |
+
seg_a = [1] * len(src_a)
|
96 |
+
seg_b = [1] * len(src_b)
|
97 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
98 |
+
|
99 |
+
if len(src_a) >= args.seq_length:
|
100 |
+
src_a = src_a[:args.seq_length]
|
101 |
+
seg_a = seg_a[:args.seq_length]
|
102 |
+
while len(src_a) < args.seq_length:
|
103 |
+
src_a.append(PAD_ID)
|
104 |
+
seg_a.append(0)
|
105 |
+
|
106 |
+
if len(src_b) >= args.seq_length:
|
107 |
+
src_b = src_b[:args.seq_length]
|
108 |
+
seg_b = seg_b[:args.seq_length]
|
109 |
+
while len(src_b) < args.seq_length:
|
110 |
+
src_b.append(PAD_ID)
|
111 |
+
seg_b.append(0)
|
112 |
+
|
113 |
+
if "label" in columns:
|
114 |
+
tgt = float(line[columns["label"]])
|
115 |
+
dataset.append(((src_a, src_b), tgt, (seg_a, seg_b)))
|
116 |
+
else:
|
117 |
+
dataset.append(((src_a, src_a), -1, (seg_a, seg_a)))
|
118 |
+
return dataset
|
119 |
+
|
120 |
+
|
121 |
+
def evaluate(args, dataset):
|
122 |
+
src_a = torch.LongTensor([example[0][0] for example in dataset])
|
123 |
+
src_b = torch.LongTensor([example[0][1] for example in dataset])
|
124 |
+
tgt = torch.FloatTensor([example[1] for example in dataset])
|
125 |
+
seg_a = torch.LongTensor([example[2][0] for example in dataset])
|
126 |
+
seg_b = torch.LongTensor([example[2][1] for example in dataset])
|
127 |
+
|
128 |
+
all_similarities = []
|
129 |
+
batch_size = args.batch_size
|
130 |
+
args.model.eval()
|
131 |
+
|
132 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(batch_loader(batch_size, (src_a, src_b), tgt, (seg_a, seg_b))):
|
133 |
+
|
134 |
+
src_a_batch, src_b_batch = src_batch
|
135 |
+
seg_a_batch, seg_b_batch = seg_batch
|
136 |
+
|
137 |
+
src_a_batch = src_a_batch.to(args.device)
|
138 |
+
src_b_batch = src_b_batch.to(args.device)
|
139 |
+
|
140 |
+
seg_a_batch = seg_a_batch.to(args.device)
|
141 |
+
seg_b_batch = seg_b_batch.to(args.device)
|
142 |
+
|
143 |
+
with torch.no_grad():
|
144 |
+
features_0, features_1 = args.model((src_a_batch, src_b_batch), (seg_a_batch, seg_b_batch))
|
145 |
+
similarity_matrix = similarity(features_0, features_1, 1)
|
146 |
+
|
147 |
+
for j in range(similarity_matrix.size(0)):
|
148 |
+
all_similarities.append(similarity_matrix[j][j].item())
|
149 |
+
|
150 |
+
corrcoef = scipy.stats.spearmanr(tgt, all_similarities).correlation
|
151 |
+
args.logger.info("Spearman's correlation: {:.4f}".format(corrcoef))
|
152 |
+
return corrcoef
|
153 |
+
|
154 |
+
|
155 |
+
def similarity(x, y, temperature):
|
156 |
+
x = x / x.norm(dim=-1, keepdim=True)
|
157 |
+
y = y / y.norm(dim=-1, keepdim=True)
|
158 |
+
return torch.matmul(x, y.transpose(-2, -1)) / temperature
|
159 |
+
|
160 |
+
|
161 |
+
def main():
|
162 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
163 |
+
|
164 |
+
finetune_opts(parser)
|
165 |
+
|
166 |
+
tokenizer_opts(parser)
|
167 |
+
|
168 |
+
parser.add_argument("--temperature", type=float, default=0.05)
|
169 |
+
parser.add_argument("--eval_steps", type=int, default=200, help="Evaluate frequency.")
|
170 |
+
|
171 |
+
args = parser.parse_args()
|
172 |
+
|
173 |
+
# Load the hyperparameters from the config file.
|
174 |
+
args = load_hyperparam(args)
|
175 |
+
|
176 |
+
set_seed(args.seed)
|
177 |
+
|
178 |
+
# Build tokenizer.
|
179 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
180 |
+
|
181 |
+
# Build classification model.
|
182 |
+
model = SimCSE(args)
|
183 |
+
|
184 |
+
# Load or initialize parameters.
|
185 |
+
load_or_initialize_parameters(args, model)
|
186 |
+
|
187 |
+
# Get logger.
|
188 |
+
args.logger = init_logger(args)
|
189 |
+
|
190 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
191 |
+
model = model.to(args.device)
|
192 |
+
|
193 |
+
# Training phase.
|
194 |
+
trainset = read_dataset(args, args.train_path)
|
195 |
+
instances_num = len(trainset)
|
196 |
+
batch_size = args.batch_size
|
197 |
+
|
198 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
199 |
+
|
200 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
201 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
202 |
+
|
203 |
+
optimizer, scheduler = build_optimizer(args, model)
|
204 |
+
|
205 |
+
if args.fp16:
|
206 |
+
try:
|
207 |
+
from apex import amp
|
208 |
+
except ImportError:
|
209 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
210 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
211 |
+
args.amp = amp
|
212 |
+
|
213 |
+
if torch.cuda.device_count() > 1:
|
214 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
215 |
+
model = torch.nn.DataParallel(model)
|
216 |
+
args.model = model
|
217 |
+
|
218 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
219 |
+
|
220 |
+
args.logger.info("Start training.")
|
221 |
+
for epoch in range(1, args.epochs_num + 1):
|
222 |
+
random.shuffle(trainset)
|
223 |
+
src_a = torch.LongTensor([example[0][0] for example in trainset])
|
224 |
+
src_b = torch.LongTensor([example[0][1] for example in trainset])
|
225 |
+
tgt = torch.FloatTensor([example[1] for example in trainset])
|
226 |
+
seg_a = torch.LongTensor([example[2][0] for example in trainset])
|
227 |
+
seg_b = torch.LongTensor([example[2][1] for example in trainset])
|
228 |
+
|
229 |
+
model.train()
|
230 |
+
for i, (src_batch, tgt_batch, seg_batch) in enumerate(batch_loader(batch_size, (src_a, src_b), tgt, (seg_a, seg_b))):
|
231 |
+
model.zero_grad()
|
232 |
+
|
233 |
+
src_a_batch, src_b_batch = src_batch
|
234 |
+
seg_a_batch, seg_b_batch = seg_batch
|
235 |
+
|
236 |
+
src_a_batch = src_a_batch.to(args.device)
|
237 |
+
src_b_batch = src_b_batch.to(args.device)
|
238 |
+
|
239 |
+
seg_a_batch = seg_a_batch.to(args.device)
|
240 |
+
seg_b_batch = seg_b_batch.to(args.device)
|
241 |
+
|
242 |
+
features_0, features_1 = model((src_a_batch, src_b_batch), (seg_a_batch, seg_b_batch))
|
243 |
+
|
244 |
+
similarity_matrix = similarity(features_0, features_1, args.temperature)
|
245 |
+
tgt_batch = torch.arange(similarity_matrix.size(0), device=similarity_matrix.device, dtype=torch.long)
|
246 |
+
loss = nn.CrossEntropyLoss()(similarity_matrix, tgt_batch)
|
247 |
+
|
248 |
+
if args.fp16:
|
249 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
250 |
+
scaled_loss.backward()
|
251 |
+
else:
|
252 |
+
loss.backward()
|
253 |
+
|
254 |
+
optimizer.step()
|
255 |
+
scheduler.step()
|
256 |
+
|
257 |
+
total_loss += loss.item()
|
258 |
+
if (i + 1) % args.report_steps == 0:
|
259 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}"
|
260 |
+
.format(epoch, i + 1, total_loss / args.report_steps))
|
261 |
+
total_loss = 0.0
|
262 |
+
|
263 |
+
if (i + 1) % args.eval_steps == 0 or (i + 1) == math.ceil(instances_num / batch_size):
|
264 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
265 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Evaluate result: {}, Best result: {}"
|
266 |
+
.format(epoch, i + 1, result, best_result))
|
267 |
+
if result > best_result:
|
268 |
+
best_result = result
|
269 |
+
save_model(model, args.output_model_path)
|
270 |
+
args.logger.info("It is the best model until now. Save it to {}".format(args.output_model_path))
|
271 |
+
|
272 |
+
|
273 |
+
if __name__ == "__main__":
|
274 |
+
main()
|
train/finetune/run_speech2text.py
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for speech-to-text fine-tuning.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import editdistance
|
9 |
+
import torch
|
10 |
+
import torchaudio
|
11 |
+
import torchaudio.compliance.kaldi as ta_kaldi
|
12 |
+
|
13 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
14 |
+
sys.path.append(tencentpretrain_dir)
|
15 |
+
|
16 |
+
from tencentpretrain.model_saver import save_model
|
17 |
+
from tencentpretrain.decoders import *
|
18 |
+
from tencentpretrain.targets import *
|
19 |
+
from tencentpretrain.utils import utterance_cmvn
|
20 |
+
from finetune.run_classifier import *
|
21 |
+
|
22 |
+
|
23 |
+
class Speech2text(torch.nn.Module):
|
24 |
+
def __init__(self, args):
|
25 |
+
super(Speech2text, self).__init__()
|
26 |
+
self.embedding = Embedding(args)
|
27 |
+
for embedding_name in args.embedding:
|
28 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
29 |
+
self.embedding.update(tmp_emb, embedding_name)
|
30 |
+
self.encoder = str2encoder[args.encoder](args)
|
31 |
+
self.tgt_embedding = Embedding(args)
|
32 |
+
for embedding_name in args.tgt_embedding:
|
33 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
34 |
+
self.tgt_embedding.update(tmp_emb, embedding_name)
|
35 |
+
self.decoder = str2decoder[args.decoder](args)
|
36 |
+
self.target = Target()
|
37 |
+
for target_name in args.target:
|
38 |
+
tmp_target = str2target[target_name](args, len(args.tokenizer.vocab))
|
39 |
+
self.target.update(tmp_target, target_name)
|
40 |
+
if args.tie_weights:
|
41 |
+
self.target.lm.output_layer.weight = self.tgt_embedding.word.embedding.weight
|
42 |
+
|
43 |
+
def encode(self, src, seg):
|
44 |
+
emb = self.embedding(src, seg)
|
45 |
+
memory_bank = self.encoder(emb, seg)
|
46 |
+
return memory_bank, emb
|
47 |
+
|
48 |
+
def decode(self, emb, memory_bank, tgt, tgt_seg):
|
49 |
+
tgt_in, tgt_out, _ = tgt
|
50 |
+
decoder_emb = self.tgt_embedding(tgt_in, tgt_seg)
|
51 |
+
hidden = self.decoder(memory_bank, decoder_emb, [emb.abs()[:,:,0]])
|
52 |
+
output = self.target.lm.output_layer(hidden)
|
53 |
+
return output
|
54 |
+
|
55 |
+
def forward(self, src, tgt, seg, tgt_seg, memory_bank=None, only_use_encoder=False):
|
56 |
+
if only_use_encoder:
|
57 |
+
return self.encode(src, seg)
|
58 |
+
if memory_bank is not None:
|
59 |
+
emb = src
|
60 |
+
return self.decode(emb, memory_bank, tgt, tgt_seg)
|
61 |
+
tgt_in, tgt_out, _ = tgt
|
62 |
+
memory_bank, emb = self.encode(src, seg)
|
63 |
+
|
64 |
+
if tgt_out is None:
|
65 |
+
output = self.decode(emb, memory_bank, tgt, None)
|
66 |
+
return None, output
|
67 |
+
else:
|
68 |
+
decoder_emb = self.tgt_embedding(tgt_in, tgt_seg)
|
69 |
+
hidden = self.decoder(memory_bank, decoder_emb, (seg,))
|
70 |
+
loss = self.target(hidden, tgt_out, None)[0]
|
71 |
+
return loss, None
|
72 |
+
|
73 |
+
|
74 |
+
def read_dataset(args, path):
|
75 |
+
dataset, columns = [], {}
|
76 |
+
padding_vector = torch.FloatTensor(args.audio_feature_size * [0.0] if args.audio_feature_size > 1 else 0.0).unsqueeze(0)
|
77 |
+
|
78 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
79 |
+
for line_id, line in enumerate(f):
|
80 |
+
if line_id == 0:
|
81 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
82 |
+
columns[column_name] = i
|
83 |
+
continue
|
84 |
+
line = line.rstrip("\r\n").split("\t")
|
85 |
+
text, wav_path = line[columns["text"]], line[columns["wav_path"]]
|
86 |
+
tgt = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN]) + \
|
87 |
+
args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text)) + \
|
88 |
+
args.tokenizer.convert_tokens_to_ids([SEP_TOKEN])
|
89 |
+
|
90 |
+
if len(tgt) > args.seq_length:
|
91 |
+
tgt = tgt[: args.seq_length]
|
92 |
+
|
93 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])
|
94 |
+
pad_num = args.seq_length - len(tgt)
|
95 |
+
tgt = tgt + PAD_ID * pad_num
|
96 |
+
|
97 |
+
waveform, sample_rate = torchaudio.load(wav_path)
|
98 |
+
waveform = waveform * (2 ** 15) # Kaldi compliance: 16-bit signed integers
|
99 |
+
feature = ta_kaldi.fbank(waveform, num_mel_bins=args.audio_feature_size, sample_frequency=sample_rate)
|
100 |
+
if "ceptral_normalize" in args.audio_preprocess:
|
101 |
+
feature = utterance_cmvn(feature)
|
102 |
+
difference = args.max_audio_frames - feature.size(0)
|
103 |
+
|
104 |
+
if difference < 0:
|
105 |
+
continue
|
106 |
+
else:
|
107 |
+
src_audio = torch.cat([feature] + [padding_vector] * difference)
|
108 |
+
seg_audio = [1] * int(feature.size(0) / args.conv_layers_num / 2) + [0] * (int(args.max_audio_frames /args.conv_layers_num / 2) - int(feature.size(0) / args.conv_layers_num / 2))
|
109 |
+
|
110 |
+
tgt_in = tgt[:-1]
|
111 |
+
tgt_out = tgt[1:]
|
112 |
+
tgt_seg = [1] * (len(tgt[1:]) - pad_num) + [0] * pad_num
|
113 |
+
|
114 |
+
dataset.append((src_audio, tgt_in, tgt_out, seg_audio, tgt_seg))
|
115 |
+
|
116 |
+
return dataset
|
117 |
+
|
118 |
+
|
119 |
+
def batch_loader(batch_size, src, tgt_in, tgt_out, seg, tgt_seg):
|
120 |
+
instances_num = src.size()[0]
|
121 |
+
for i in range(instances_num // batch_size):
|
122 |
+
src_batch = src[i * batch_size : (i + 1) * batch_size, :]
|
123 |
+
tgt_in_batch = tgt_in[i * batch_size : (i + 1) * batch_size, :]
|
124 |
+
tgt_out_batch = tgt_out[i * batch_size : (i + 1) * batch_size, :]
|
125 |
+
seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]
|
126 |
+
tgt_seg_batch = tgt_seg[i * batch_size : (i + 1) * batch_size, :]
|
127 |
+
yield src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch
|
128 |
+
|
129 |
+
if instances_num > instances_num // batch_size * batch_size:
|
130 |
+
src_batch = src[instances_num // batch_size * batch_size :, :]
|
131 |
+
tgt_in_batch = tgt_in[instances_num // batch_size * batch_size :, :]
|
132 |
+
tgt_out_batch = tgt_out[instances_num // batch_size * batch_size :, :]
|
133 |
+
seg_batch = seg[instances_num // batch_size * batch_size :, :]
|
134 |
+
tgt_seg_batch = tgt_seg[instances_num // batch_size * batch_size :, :]
|
135 |
+
yield src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch
|
136 |
+
|
137 |
+
|
138 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch):
|
139 |
+
model.zero_grad()
|
140 |
+
|
141 |
+
src_batch = src_batch.to(args.device)
|
142 |
+
tgt_in_batch = tgt_in_batch.to(args.device)
|
143 |
+
tgt_out_batch = tgt_out_batch.to(args.device)
|
144 |
+
seg_batch = seg_batch.to(args.device)
|
145 |
+
tgt_seg_batch = tgt_seg_batch.to(args.device)
|
146 |
+
|
147 |
+
loss, _ = model(src_batch, (tgt_in_batch, tgt_out_batch, src_batch), seg_batch, tgt_seg_batch)
|
148 |
+
|
149 |
+
if torch.cuda.device_count() > 1:
|
150 |
+
loss = torch.mean(loss)
|
151 |
+
|
152 |
+
if args.fp16:
|
153 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
154 |
+
scaled_loss.backward()
|
155 |
+
else:
|
156 |
+
loss.backward()
|
157 |
+
|
158 |
+
optimizer.step()
|
159 |
+
scheduler.step()
|
160 |
+
|
161 |
+
return loss
|
162 |
+
|
163 |
+
|
164 |
+
def evaluate(args, dataset):
|
165 |
+
|
166 |
+
src = torch.stack([example[0] for example in dataset], dim=0)
|
167 |
+
tgt_in = torch.LongTensor([example[1] for example in dataset])
|
168 |
+
tgt_out = torch.LongTensor([example[2] for example in dataset])
|
169 |
+
seg = torch.LongTensor([example[3] for example in dataset])
|
170 |
+
tgt_seg = torch.LongTensor([example[4] for example in dataset])
|
171 |
+
|
172 |
+
generated_sentences = []
|
173 |
+
args.model.eval()
|
174 |
+
|
175 |
+
for i, (src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch) in enumerate(batch_loader(args.batch_size, src, tgt_in, tgt_out, seg, tgt_seg)):
|
176 |
+
|
177 |
+
src_batch = src_batch.to(args.device)
|
178 |
+
tgt_in_batch = torch.zeros(tgt_in_batch.size()[0], 1, dtype=torch.long, device=args.device)
|
179 |
+
tgt_seg_batch = torch.ones(tgt_in_batch.size()[0], 1, dtype=torch.long, device=args.device)
|
180 |
+
for j in range(tgt_in_batch.size()[0]):
|
181 |
+
tgt_in_batch[j][0] = args.tokenizer.vocab.get(CLS_TOKEN)
|
182 |
+
|
183 |
+
seg_batch = seg_batch.to(args.device)
|
184 |
+
|
185 |
+
with torch.no_grad():
|
186 |
+
memory_bank, emb = args.model(src_batch, None, seg_batch, tgt_seg_batch, only_use_encoder=True)
|
187 |
+
|
188 |
+
for _ in range(args.tgt_seq_length):
|
189 |
+
tgt_out_batch = tgt_in_batch
|
190 |
+
with torch.no_grad():
|
191 |
+
outputs = args.model(emb, (tgt_in_batch, tgt_out_batch, src_batch), None, tgt_seg_batch, memory_bank=memory_bank)
|
192 |
+
|
193 |
+
next_token_logits = outputs[:, -1]
|
194 |
+
next_tokens = torch.argmax(next_token_logits, dim=1).unsqueeze(1)
|
195 |
+
tgt_in_batch = torch.cat([tgt_in_batch, next_tokens], dim=1)
|
196 |
+
tgt_seg_batch = torch.ones(tgt_in_batch.size()[0], tgt_in_batch.size()[1], dtype=torch.long, device=args.device)
|
197 |
+
for j in range(len(outputs)):
|
198 |
+
sentence = "".join([args.tokenizer.inv_vocab[token_id.item()] for token_id in tgt_in_batch[j][1:]])
|
199 |
+
generated_sentences.append(sentence)
|
200 |
+
|
201 |
+
w_errs = 0
|
202 |
+
w_total = 0
|
203 |
+
|
204 |
+
for i, example in enumerate(dataset):
|
205 |
+
tgt = example[2]
|
206 |
+
tgt_token = "".join([args.tokenizer.inv_vocab[token_id] for token_id in tgt[:-2]])
|
207 |
+
generated_sentences[i] = generated_sentences[i].split(SEP_TOKEN)[0]
|
208 |
+
|
209 |
+
pred = generated_sentences[i].split("▁")
|
210 |
+
gold = tgt_token.split(SEP_TOKEN)[0].split("▁")
|
211 |
+
w_errs += editdistance.eval(pred, gold)
|
212 |
+
w_total += len(gold)
|
213 |
+
|
214 |
+
args.logger.info("WER. (Word_Errors/Total): {:.4f} ({}/{}) ".format(w_errs / w_total, w_errs, w_total))
|
215 |
+
return w_errs / w_total
|
216 |
+
|
217 |
+
|
218 |
+
def main():
|
219 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
220 |
+
|
221 |
+
finetune_opts(parser)
|
222 |
+
|
223 |
+
tokenizer_opts(parser)
|
224 |
+
|
225 |
+
parser.add_argument("--tgt_seq_length", type=int, default=50,
|
226 |
+
help="Output sequence length.")
|
227 |
+
|
228 |
+
args = parser.parse_args()
|
229 |
+
|
230 |
+
# Load the hyperparameters from the config file.
|
231 |
+
args = load_hyperparam(args)
|
232 |
+
|
233 |
+
set_seed(args.seed)
|
234 |
+
|
235 |
+
# Build tokenizer.
|
236 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
237 |
+
|
238 |
+
# Build classification model.
|
239 |
+
model = Speech2text(args)
|
240 |
+
|
241 |
+
# Load or initialize parameters.
|
242 |
+
load_or_initialize_parameters(args, model)
|
243 |
+
|
244 |
+
# Get logger.
|
245 |
+
args.logger = init_logger(args)
|
246 |
+
|
247 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
248 |
+
model = model.to(args.device)
|
249 |
+
|
250 |
+
# Training phase.
|
251 |
+
trainset = read_dataset(args, args.train_path)
|
252 |
+
instances_num = len(trainset)
|
253 |
+
batch_size = args.batch_size
|
254 |
+
|
255 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
256 |
+
|
257 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
258 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
259 |
+
|
260 |
+
optimizer, scheduler = build_optimizer(args, model)
|
261 |
+
|
262 |
+
if args.fp16:
|
263 |
+
try:
|
264 |
+
from apex import amp
|
265 |
+
except ImportError:
|
266 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
267 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
268 |
+
args.amp = amp
|
269 |
+
|
270 |
+
if torch.cuda.device_count() > 1:
|
271 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
272 |
+
model = torch.nn.DataParallel(model)
|
273 |
+
args.model = model
|
274 |
+
|
275 |
+
total_loss, result, best_result = 0.0, 0.0, 100.0
|
276 |
+
|
277 |
+
args.logger.info("Start training.")
|
278 |
+
|
279 |
+
for epoch in range(1, args.epochs_num + 1):
|
280 |
+
random.shuffle(trainset)
|
281 |
+
src = torch.stack([example[0] for example in trainset], dim=0)
|
282 |
+
tgt_in = torch.LongTensor([example[1] for example in trainset])
|
283 |
+
tgt_out = torch.LongTensor([example[2] for example in trainset])
|
284 |
+
seg = torch.LongTensor([example[3] for example in trainset])
|
285 |
+
tgt_seg = torch.LongTensor([example[4] for example in trainset])
|
286 |
+
|
287 |
+
model.train()
|
288 |
+
for i, (src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch) in enumerate(batch_loader(batch_size, src, tgt_in, tgt_out, seg, tgt_seg)):
|
289 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch)
|
290 |
+
total_loss += loss.item()
|
291 |
+
if (i + 1) % args.report_steps == 0:
|
292 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
293 |
+
total_loss = 0.0
|
294 |
+
|
295 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
296 |
+
if result < best_result:
|
297 |
+
best_result = result
|
298 |
+
save_model(model, args.output_model_path)
|
299 |
+
|
300 |
+
# Evaluation phase.
|
301 |
+
if args.test_path is not None:
|
302 |
+
args.logger.info("Test set evaluation.")
|
303 |
+
if torch.cuda.device_count() > 1:
|
304 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
305 |
+
else:
|
306 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
307 |
+
evaluate(args, read_dataset(args, args.test_path))
|
308 |
+
|
309 |
+
|
310 |
+
if __name__ == "__main__":
|
311 |
+
main()
|
train/finetune/run_text2text.py
ADDED
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for text-to-text fine-tuning.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
|
10 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
11 |
+
sys.path.append(tencentpretrain_dir)
|
12 |
+
|
13 |
+
from tencentpretrain.model_saver import save_model
|
14 |
+
from tencentpretrain.decoders import *
|
15 |
+
from tencentpretrain.targets import *
|
16 |
+
from finetune.run_classifier import *
|
17 |
+
|
18 |
+
|
19 |
+
class Text2text(torch.nn.Module):
|
20 |
+
def __init__(self, args):
|
21 |
+
super(Text2text, self).__init__()
|
22 |
+
self.embedding = Embedding(args)
|
23 |
+
for embedding_name in args.embedding:
|
24 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
25 |
+
self.embedding.update(tmp_emb, embedding_name)
|
26 |
+
self.encoder = str2encoder[args.encoder](args)
|
27 |
+
self.tgt_embedding = Embedding(args)
|
28 |
+
for embedding_name in args.tgt_embedding:
|
29 |
+
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
|
30 |
+
self.tgt_embedding.update(tmp_emb, embedding_name)
|
31 |
+
self.decoder = str2decoder[args.decoder](args)
|
32 |
+
self.target = Target()
|
33 |
+
self.target.update(LmTarget(args, len(args.tokenizer.vocab)), "lm")
|
34 |
+
if args.tie_weights:
|
35 |
+
self.target.lm.output_layer.weight = self.embedding.word.embedding.weight
|
36 |
+
if args.share_embedding:
|
37 |
+
self.tgt_embedding.word.embedding.weight = self.embedding.word.embedding.weight
|
38 |
+
|
39 |
+
def encode(self, src, seg):
|
40 |
+
emb = self.embedding(src, seg)
|
41 |
+
memory_bank = self.encoder(emb, seg)
|
42 |
+
return memory_bank
|
43 |
+
|
44 |
+
def decode(self, src, memory_bank, tgt, tgt_seg):
|
45 |
+
tgt_in, tgt_out, _ = tgt
|
46 |
+
decoder_emb = self.tgt_embedding(tgt_in, tgt_seg)
|
47 |
+
hidden = self.decoder(memory_bank, decoder_emb, (src,))
|
48 |
+
output = self.target.lm.output_layer(hidden)
|
49 |
+
return output
|
50 |
+
|
51 |
+
def forward(self, src, tgt, seg, tgt_seg, memory_bank=None, only_use_encoder=False):
|
52 |
+
if only_use_encoder:
|
53 |
+
return self.encode(src, seg)
|
54 |
+
if memory_bank is not None:
|
55 |
+
return self.decode(src, memory_bank, tgt, tgt_seg)
|
56 |
+
tgt_in, tgt_out, _ = tgt
|
57 |
+
memory_bank = self.encode(src, seg)
|
58 |
+
if tgt_out is None:
|
59 |
+
output = self.decode(src, memory_bank, tgt)
|
60 |
+
return None, output
|
61 |
+
else:
|
62 |
+
decoder_emb = self.tgt_embedding(tgt_in, tgt_seg)
|
63 |
+
hidden = self.decoder(memory_bank, decoder_emb, (seg,))
|
64 |
+
loss = self.target(hidden, tgt_out, None)[0]
|
65 |
+
return loss, None
|
66 |
+
|
67 |
+
|
68 |
+
def read_dataset(args, path):
|
69 |
+
dataset, columns = [], {}
|
70 |
+
with open(path, mode="r", encoding="utf-8") as f:
|
71 |
+
for line_id, line in enumerate(f):
|
72 |
+
if line_id == 0:
|
73 |
+
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
|
74 |
+
columns[column_name] = i
|
75 |
+
continue
|
76 |
+
line = line.rstrip("\r\n").split("\t")
|
77 |
+
|
78 |
+
if "text_b" in columns:
|
79 |
+
text = line[columns["text_a"]] + SEP_TOKEN + line[columns["text_b"]]
|
80 |
+
label = line[columns["label"]]
|
81 |
+
else:
|
82 |
+
text, label = line[columns["text_a"]], line[columns["label"]]
|
83 |
+
|
84 |
+
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text) + [SEP_TOKEN])
|
85 |
+
tgt_in = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(label) + [SEP_TOKEN])
|
86 |
+
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
|
87 |
+
seg = [1] * len(src)
|
88 |
+
tgt_seg = [1] * len(tgt_in)
|
89 |
+
|
90 |
+
if len(src) > args.seq_length:
|
91 |
+
src = src[: args.seq_length]
|
92 |
+
seg = seg[: args.seq_length]
|
93 |
+
if len(tgt_in) > args.tgt_seq_length:
|
94 |
+
tgt_in = tgt_in[: args.tgt_seq_length]
|
95 |
+
tgt_seg = tgt_seg[: args.tgt_seq_length]
|
96 |
+
tgt_out = tgt_in[1:] + [PAD_ID]
|
97 |
+
|
98 |
+
while len(src) < args.seq_length:
|
99 |
+
src.append(PAD_ID)
|
100 |
+
seg.append(0)
|
101 |
+
while len(tgt_in) < args.tgt_seq_length:
|
102 |
+
tgt_in.append(PAD_ID)
|
103 |
+
tgt_out.append(PAD_ID)
|
104 |
+
tgt_seg.append(PAD_ID)
|
105 |
+
|
106 |
+
dataset.append((src, tgt_in, tgt_out, seg, tgt_seg))
|
107 |
+
|
108 |
+
return dataset
|
109 |
+
|
110 |
+
|
111 |
+
def batch_loader(batch_size, src, tgt_in, tgt_out, seg, tgt_seg):
|
112 |
+
instances_num = src.size()[0]
|
113 |
+
for i in range(instances_num // batch_size):
|
114 |
+
src_batch = src[i * batch_size : (i + 1) * batch_size, :]
|
115 |
+
tgt_in_batch = tgt_in[i * batch_size : (i + 1) * batch_size, :]
|
116 |
+
tgt_out_batch = tgt_out[i * batch_size : (i + 1) * batch_size, :]
|
117 |
+
seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]
|
118 |
+
tgt_seg_batch = tgt_seg[i * batch_size : (i + 1) * batch_size, :]
|
119 |
+
yield src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch
|
120 |
+
|
121 |
+
if instances_num > instances_num // batch_size * batch_size:
|
122 |
+
src_batch = src[instances_num // batch_size * batch_size :, :]
|
123 |
+
tgt_in_batch = tgt_in[instances_num // batch_size * batch_size :, :]
|
124 |
+
tgt_out_batch = tgt_out[instances_num // batch_size * batch_size :, :]
|
125 |
+
seg_batch = seg[instances_num // batch_size * batch_size :, :]
|
126 |
+
tgt_seg_batch = tgt_seg[instances_num // batch_size * batch_size :, :]
|
127 |
+
yield src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch
|
128 |
+
|
129 |
+
|
130 |
+
def train_model(args, model, optimizer, scheduler, src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch):
|
131 |
+
model.zero_grad()
|
132 |
+
|
133 |
+
src_batch = src_batch.to(args.device)
|
134 |
+
tgt_in_batch = tgt_in_batch.to(args.device)
|
135 |
+
tgt_out_batch = tgt_out_batch.to(args.device)
|
136 |
+
seg_batch = seg_batch.to(args.device)
|
137 |
+
tgt_seg_batch = tgt_seg_batch.to(args.device)
|
138 |
+
|
139 |
+
loss, _ = model(src_batch, (tgt_in_batch, tgt_out_batch, src_batch), seg_batch, tgt_seg_batch)
|
140 |
+
|
141 |
+
if torch.cuda.device_count() > 1:
|
142 |
+
loss = torch.mean(loss)
|
143 |
+
|
144 |
+
if args.fp16:
|
145 |
+
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
|
146 |
+
scaled_loss.backward()
|
147 |
+
else:
|
148 |
+
loss.backward()
|
149 |
+
|
150 |
+
optimizer.step()
|
151 |
+
scheduler.step()
|
152 |
+
|
153 |
+
return loss
|
154 |
+
|
155 |
+
|
156 |
+
def evaluate(args, dataset):
|
157 |
+
|
158 |
+
src = torch.LongTensor([example[0] for example in dataset])
|
159 |
+
tgt_in = torch.LongTensor([example[1] for example in dataset])
|
160 |
+
tgt_out = torch.LongTensor([example[2] for example in dataset])
|
161 |
+
seg = torch.LongTensor([example[3] for example in dataset])
|
162 |
+
tgt_seg = torch.LongTensor([example[4] for example in dataset])
|
163 |
+
|
164 |
+
generated_sentences = []
|
165 |
+
args.model.eval()
|
166 |
+
|
167 |
+
for i, (src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch) in enumerate(batch_loader(args.batch_size, src, tgt_in, tgt_out, seg, tgt_seg)):
|
168 |
+
|
169 |
+
src_batch = src_batch.to(args.device)
|
170 |
+
tgt_in_batch = torch.zeros(tgt_in_batch.size()[0], 1, dtype=torch.long, device=args.device)
|
171 |
+
tgt_seg_batch = torch.ones(tgt_in_batch.size()[0], 1, dtype=torch.long, device=args.device)
|
172 |
+
for j in range(tgt_in_batch.size()[0]):
|
173 |
+
tgt_in_batch[j][-1] = args.tokenizer.vocab.get(CLS_TOKEN)
|
174 |
+
|
175 |
+
seg_batch = seg_batch.to(args.device)
|
176 |
+
|
177 |
+
with torch.no_grad():
|
178 |
+
memory_bank = args.model(src_batch, None, seg_batch, tgt_seg_batch, only_use_encoder=True)
|
179 |
+
|
180 |
+
for _ in range(args.tgt_seq_length):
|
181 |
+
tgt_out_batch = tgt_in_batch
|
182 |
+
with torch.no_grad():
|
183 |
+
outputs = args.model(src_batch, (tgt_in_batch, tgt_out_batch, src_batch), None, tgt_seg_batch, memory_bank=memory_bank)
|
184 |
+
|
185 |
+
next_token_logits = outputs[:, -1]
|
186 |
+
next_tokens = torch.argmax(next_token_logits, dim=1).unsqueeze(1)
|
187 |
+
tgt_in_batch = torch.cat([tgt_in_batch, next_tokens], dim=1)
|
188 |
+
tgt_seg_batch = torch.ones(tgt_in_batch.size()[0], tgt_in_batch.size()[1], dtype=torch.long, device=args.device)
|
189 |
+
for j in range(len(outputs)):
|
190 |
+
sentence = " ".join([args.tokenizer.inv_vocab[token_id.item()] for token_id in tgt_in_batch[j][1:]])
|
191 |
+
generated_sentences.append(sentence)
|
192 |
+
|
193 |
+
labels = {}
|
194 |
+
labels_num = 0
|
195 |
+
for example in dataset:
|
196 |
+
label = "".join([args.tokenizer.inv_vocab[token_id] for token_id in example[2][:-2]]).split(SEP_TOKEN)[0]
|
197 |
+
if not labels.get(label, None):
|
198 |
+
labels[label] = labels_num
|
199 |
+
labels_num += 1
|
200 |
+
confusion_matrix = torch.zeros(labels_num, labels_num, dtype=torch.long)
|
201 |
+
correct = 0
|
202 |
+
|
203 |
+
for i, example in enumerate(dataset):
|
204 |
+
tgt = example[2]
|
205 |
+
tgt_token = " ".join([args.tokenizer.inv_vocab[token_id] for token_id in tgt[:-2]])
|
206 |
+
generated_sentences[i] = generated_sentences[i].split(SEP_TOKEN)[0]
|
207 |
+
|
208 |
+
pred = "".join(generated_sentences[i].split(" "))
|
209 |
+
gold = "".join(tgt_token.split(SEP_TOKEN)[0].split(" "))
|
210 |
+
|
211 |
+
if pred in labels.keys():
|
212 |
+
confusion_matrix[labels[pred], labels[gold]] += 1
|
213 |
+
|
214 |
+
if pred == gold:
|
215 |
+
correct += 1
|
216 |
+
|
217 |
+
args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / len(dataset), correct, len(dataset)))
|
218 |
+
return correct / len(dataset)
|
219 |
+
|
220 |
+
|
221 |
+
def main():
|
222 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
223 |
+
|
224 |
+
finetune_opts(parser)
|
225 |
+
|
226 |
+
tokenizer_opts(parser)
|
227 |
+
|
228 |
+
parser.add_argument("--tgt_seq_length", type=int, default=32,
|
229 |
+
help="Output sequence length.")
|
230 |
+
|
231 |
+
args = parser.parse_args()
|
232 |
+
|
233 |
+
# Load the hyperparameters from the config file.
|
234 |
+
args = load_hyperparam(args)
|
235 |
+
|
236 |
+
set_seed(args.seed)
|
237 |
+
|
238 |
+
# Build tokenizer.
|
239 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
240 |
+
|
241 |
+
# Build classification model.
|
242 |
+
model = Text2text(args)
|
243 |
+
|
244 |
+
# Load or initialize parameters.
|
245 |
+
load_or_initialize_parameters(args, model)
|
246 |
+
|
247 |
+
# Get logger.
|
248 |
+
args.logger = init_logger(args)
|
249 |
+
|
250 |
+
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
251 |
+
model = model.to(args.device)
|
252 |
+
|
253 |
+
# Training phase.
|
254 |
+
trainset = read_dataset(args, args.train_path)
|
255 |
+
instances_num = len(trainset)
|
256 |
+
batch_size = args.batch_size
|
257 |
+
|
258 |
+
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
|
259 |
+
|
260 |
+
args.logger.info("Batch size: {}".format(batch_size))
|
261 |
+
args.logger.info("The number of training instances: {}".format(instances_num))
|
262 |
+
|
263 |
+
optimizer, scheduler = build_optimizer(args, model)
|
264 |
+
|
265 |
+
if args.fp16:
|
266 |
+
try:
|
267 |
+
from apex import amp
|
268 |
+
except ImportError:
|
269 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
270 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
271 |
+
args.amp = amp
|
272 |
+
|
273 |
+
if torch.cuda.device_count() > 1:
|
274 |
+
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
275 |
+
model = torch.nn.DataParallel(model)
|
276 |
+
args.model = model
|
277 |
+
|
278 |
+
total_loss, result, best_result = 0.0, 0.0, 0.0
|
279 |
+
|
280 |
+
args.logger.info("Start training.")
|
281 |
+
|
282 |
+
for epoch in range(1, args.epochs_num + 1):
|
283 |
+
random.shuffle(trainset)
|
284 |
+
src = torch.LongTensor([example[0] for example in trainset])
|
285 |
+
tgt_in = torch.LongTensor([example[1] for example in trainset])
|
286 |
+
tgt_out = torch.LongTensor([example[2] for example in trainset])
|
287 |
+
seg = torch.LongTensor([example[3] for example in trainset])
|
288 |
+
tgt_seg = torch.LongTensor([example[4] for example in trainset])
|
289 |
+
|
290 |
+
model.train()
|
291 |
+
for i, (src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch) in enumerate(batch_loader(batch_size, src, tgt_in, tgt_out, seg, tgt_seg)):
|
292 |
+
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_in_batch, tgt_out_batch, seg_batch, tgt_seg_batch)
|
293 |
+
total_loss += loss.item()
|
294 |
+
if (i + 1) % args.report_steps == 0:
|
295 |
+
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
|
296 |
+
total_loss = 0.0
|
297 |
+
|
298 |
+
result = evaluate(args, read_dataset(args, args.dev_path))
|
299 |
+
if result > best_result:
|
300 |
+
best_result = result
|
301 |
+
save_model(model, args.output_model_path)
|
302 |
+
|
303 |
+
# Evaluation phase.
|
304 |
+
if args.test_path is not None:
|
305 |
+
args.logger.info("Test set evaluation.")
|
306 |
+
if torch.cuda.device_count() > 1:
|
307 |
+
args.model.module.load_state_dict(torch.load(args.output_model_path))
|
308 |
+
else:
|
309 |
+
args.model.load_state_dict(torch.load(args.output_model_path))
|
310 |
+
evaluate(args, read_dataset(args, args.test_path))
|
311 |
+
|
312 |
+
|
313 |
+
if __name__ == "__main__":
|
314 |
+
main()
|
train/inference/run_c3_infer.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script provides an example to wrap TencentPretrain for C3 (a multiple choice dataset) inference.
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
import argparse
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
|
10 |
+
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
11 |
+
sys.path.append(tencentpretrain_dir)
|
12 |
+
|
13 |
+
from tencentpretrain.utils.constants import *
|
14 |
+
from tencentpretrain.utils import *
|
15 |
+
from tencentpretrain.utils.config import load_hyperparam
|
16 |
+
from tencentpretrain.model_loader import load_model
|
17 |
+
from tencentpretrain.opts import infer_opts, tokenizer_opts
|
18 |
+
from finetune.run_classifier import batch_loader
|
19 |
+
from finetune.run_c3 import MultipleChoice, read_dataset
|
20 |
+
|
21 |
+
|
22 |
+
def main():
|
23 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
24 |
+
|
25 |
+
infer_opts(parser)
|
26 |
+
|
27 |
+
parser.add_argument("--max_choices_num", default=4, type=int,
|
28 |
+
help="The maximum number of cadicate answer, shorter than this will be padded.")
|
29 |
+
|
30 |
+
tokenizer_opts(parser)
|
31 |
+
|
32 |
+
args = parser.parse_args()
|
33 |
+
|
34 |
+
# Load the hyperparameters from the config file.
|
35 |
+
args = load_hyperparam(args)
|
36 |
+
|
37 |
+
# Build tokenizer.
|
38 |
+
args.tokenizer = str2tokenizer[args.tokenizer](args)
|
39 |
+
|
40 |
+
# Build classification model and load parameters.
|
41 |
+
model = MultipleChoice(args)
|
42 |
+
model = load_model(model, args.load_model_path)
|
43 |
+
|
44 |
+
# For simplicity, we use DataParallel wrapper to use multiple GPUs.
|
45 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
46 |
+
model = model.to(device)
|
47 |
+
if torch.cuda.device_count() > 1:
|
48 |
+
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
|
49 |
+
model = torch.nn.DataParallel(model)
|
50 |
+
|
51 |
+
dataset = read_dataset(args, args.test_path)
|
52 |
+
|
53 |
+
src = torch.LongTensor([example[0] for example in dataset])
|
54 |
+
tgt = torch.LongTensor([example[1] for example in dataset])
|
55 |
+
seg = torch.LongTensor([example[2] for example in dataset])
|
56 |
+
|
57 |
+
batch_size = args.batch_size
|
58 |
+
instances_num = src.size()[0]
|
59 |
+
|
60 |
+
print("The number of prediction instances: ", instances_num)
|
61 |
+
|
62 |
+
model.eval()
|
63 |
+
|
64 |
+
with open(args.test_path) as f:
|
65 |
+
data = json.load(f)
|
66 |
+
|
67 |
+
question_ids = []
|
68 |
+
for i in range(len(data)):
|
69 |
+
questions = data[i][1]
|
70 |
+
for question in questions:
|
71 |
+
question_ids.append(question["id"])
|
72 |
+
|
73 |
+
index = 0
|
74 |
+
with open(args.prediction_path, "w") as f:
|
75 |
+
for i, (src_batch, _, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
|
76 |
+
|
77 |
+
src_batch = src_batch.to(device)
|
78 |
+
seg_batch = seg_batch.to(device)
|
79 |
+
|
80 |
+
with torch.no_grad():
|
81 |
+
_, logits = model(src_batch, None, seg_batch)
|
82 |
+
|
83 |
+
pred = (torch.argmax(logits, dim=1)).cpu().numpy().tolist()
|
84 |
+
for j in range(len(pred)):
|
85 |
+
output = {}
|
86 |
+
output["id"] = question_ids[index]
|
87 |
+
index += 1
|
88 |
+
output["label"] = int(pred[j])
|
89 |
+
f.write(json.dumps(output))
|
90 |
+
f.write("\n")
|
91 |
+
|
92 |
+
|
93 |
+
if __name__ == "__main__":
|
94 |
+
main()
|