HUANGYIFEI commited on
Commit
e3d777b
1 Parent(s): b500f26

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .idea/.gitignore +8 -0
  2. .idea/GraphMAE.iml +10 -0
  3. .idea/deployment.xml +14 -0
  4. .idea/inspectionProfiles/Project_Default.xml +76 -0
  5. .idea/inspectionProfiles/profiles_settings.xml +6 -0
  6. .idea/jupyter-settings.xml +19 -0
  7. .idea/misc.xml +18 -0
  8. .idea/modules.xml +8 -0
  9. .idea/workspace.xml +182 -0
  10. .ipynb_checkpoints/DataInspect-checkpoint.ipynb +396 -0
  11. .ipynb_checkpoints/Untitled-checkpoint.ipynb +6 -0
  12. .virtual_documents/DataInspect.ipynb +237 -0
  13. DataInspect.ipynb +622 -0
  14. README.md +43 -88
  15. code/QM9_dataset_class.py +51 -0
  16. code/lib/__pycache__/metrics.cpython-38.pyc +0 -0
  17. code/lib/metrics.py +95 -0
  18. code/lib/utils.py +397 -0
  19. code/model.py +90 -0
  20. code/prepare_QM9_dataset.py +48 -0
  21. code/run.py +94 -0
  22. model/epoch_0/embedding_0.dgl +0 -0
  23. model/epoch_0/embedding_1.dgl +0 -0
  24. model/epoch_0/embedding_10.dgl +0 -0
  25. model/epoch_0/embedding_100.dgl +0 -0
  26. model/epoch_0/embedding_1000.dgl +0 -0
  27. model/epoch_0/embedding_10000.dgl +0 -0
  28. model/epoch_0/embedding_100000.dgl +0 -0
  29. model/epoch_0/embedding_100001.dgl +0 -0
  30. model/epoch_0/embedding_100002.dgl +0 -0
  31. model/epoch_0/embedding_100003.dgl +0 -0
  32. model/epoch_0/embedding_100004.dgl +0 -0
  33. model/epoch_0/embedding_100005.dgl +0 -0
  34. model/epoch_0/embedding_100006.dgl +0 -0
  35. model/epoch_0/embedding_100007.dgl +0 -0
  36. model/epoch_0/embedding_100008.dgl +0 -0
  37. model/epoch_0/embedding_100009.dgl +0 -0
  38. model/epoch_0/embedding_10001.dgl +0 -0
  39. model/epoch_0/embedding_100010.dgl +0 -0
  40. model/epoch_0/embedding_100011.dgl +0 -0
  41. model/epoch_0/embedding_100012.dgl +0 -0
  42. model/epoch_0/embedding_100013.dgl +0 -0
  43. model/epoch_0/embedding_100014.dgl +0 -0
  44. model/epoch_0/embedding_100015.dgl +0 -0
  45. model/epoch_0/embedding_100016.dgl +0 -0
  46. model/epoch_0/embedding_100017.dgl +0 -0
  47. model/epoch_0/embedding_100018.dgl +0 -0
  48. model/epoch_0/embedding_100019.dgl +0 -0
  49. model/epoch_0/embedding_10002.dgl +0 -0
  50. model/epoch_0/embedding_100020.dgl +0 -0
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # 默认忽略的文件
2
+ /shelf/
3
+ /workspace.xml
4
+ # 基于编辑器的 HTTP 客户端请求
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/GraphMAE.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/model" />
6
+ </content>
7
+ <orderEntry type="jdk" jdkName="gnn_course" jdkType="Python SDK" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/deployment.xml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="PublishConfigData" remoteFilesAllowedToDisappearOnAutoupload="false">
4
+ <serverData>
5
+ <paths name="yifei@clear-antares.d2.comp.nus.edu.sg:22 password">
6
+ <serverdata>
7
+ <mappings>
8
+ <mapping local="$PROJECT_DIR$" web="/" />
9
+ </mappings>
10
+ </serverdata>
11
+ </paths>
12
+ </serverData>
13
+ </component>
14
+ </project>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
5
+ <Languages>
6
+ <language minSize="337" name="Python" />
7
+ </Languages>
8
+ </inspection_tool>
9
+ <inspection_tool class="JupyterPackageInspection" enabled="false" level="WARNING" enabled_by_default="false" />
10
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
11
+ <option name="ignoredPackages">
12
+ <value>
13
+ <list size="39">
14
+ <item index="0" class="java.lang.String" itemvalue="tqdm" />
15
+ <item index="1" class="java.lang.String" itemvalue="transformers" />
16
+ <item index="2" class="java.lang.String" itemvalue="python-Levenshtein" />
17
+ <item index="3" class="java.lang.String" itemvalue="torch" />
18
+ <item index="4" class="java.lang.String" itemvalue="numpy" />
19
+ <item index="5" class="java.lang.String" itemvalue="textworld" />
20
+ <item index="6" class="java.lang.String" itemvalue="lora" />
21
+ <item index="7" class="java.lang.String" itemvalue="matplotlib" />
22
+ <item index="8" class="java.lang.String" itemvalue="pickle" />
23
+ <item index="9" class="java.lang.String" itemvalue="fonttools" />
24
+ <item index="10" class="java.lang.String" itemvalue="numba" />
25
+ <item index="11" class="java.lang.String" itemvalue="scipy" />
26
+ <item index="12" class="java.lang.String" itemvalue="umap" />
27
+ <item index="13" class="java.lang.String" itemvalue="scikit-learn" />
28
+ <item index="14" class="java.lang.String" itemvalue="umap-learn" />
29
+ <item index="15" class="java.lang.String" itemvalue="typing_extensions" />
30
+ <item index="16" class="java.lang.String" itemvalue="kmapper" />
31
+ <item index="17" class="java.lang.String" itemvalue="llvmlite" />
32
+ <item index="18" class="java.lang.String" itemvalue="Pillow" />
33
+ <item index="19" class="java.lang.String" itemvalue="networkx" />
34
+ <item index="20" class="java.lang.String" itemvalue="joblib" />
35
+ <item index="21" class="java.lang.String" itemvalue="threadpoolctl" />
36
+ <item index="22" class="java.lang.String" itemvalue="python-dateutil" />
37
+ <item index="23" class="java.lang.String" itemvalue="kiwisolver" />
38
+ <item index="24" class="java.lang.String" itemvalue="packaging" />
39
+ <item index="25" class="java.lang.String" itemvalue="cycler" />
40
+ <item index="26" class="java.lang.String" itemvalue="pyparsing" />
41
+ <item index="27" class="java.lang.String" itemvalue="importlib-metadata" />
42
+ <item index="28" class="java.lang.String" itemvalue="Jinja2" />
43
+ <item index="29" class="java.lang.String" itemvalue="pandas" />
44
+ <item index="30" class="java.lang.String" itemvalue="nvidia-cuda-nvrtc-cu11" />
45
+ <item index="31" class="java.lang.String" itemvalue="nvidia-cudnn-cu11" />
46
+ <item index="32" class="java.lang.String" itemvalue="pynndescent" />
47
+ <item index="33" class="java.lang.String" itemvalue="Flask_Cors" />
48
+ <item index="34" class="java.lang.String" itemvalue="nvidia-cuda-runtime-cu11" />
49
+ <item index="35" class="java.lang.String" itemvalue="nvidia-cublas-cu11" />
50
+ <item index="36" class="java.lang.String" itemvalue="pymilvus" />
51
+ <item index="37" class="java.lang.String" itemvalue="Flask" />
52
+ <item index="38" class="java.lang.String" itemvalue="tensorflow" />
53
+ </list>
54
+ </value>
55
+ </option>
56
+ </inspection_tool>
57
+ <inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
58
+ <option name="ignoredErrors">
59
+ <list>
60
+ <option value="E501" />
61
+ <option value="E303" />
62
+ </list>
63
+ </option>
64
+ </inspection_tool>
65
+ <inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
66
+ <option name="ignoredErrors">
67
+ <list>
68
+ <option value="N802" />
69
+ <option value="N806" />
70
+ <option value="N803" />
71
+ <option value="N801" />
72
+ </list>
73
+ </option>
74
+ </inspection_tool>
75
+ </profile>
76
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/jupyter-settings.xml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="JupyterPersistentConnectionParameters">
4
+ <option name="knownRemoteServers">
5
+ <list>
6
+ <JupyterConnectionParameters>
7
+ <option name="authType" value="notebook" />
8
+ <option name="token" value="daa5264f4116533de57d32113fc539be09b278969305d061" />
9
+ <option name="urlString" value="http://localhost:8888" />
10
+ <authParams2>
11
+ <map>
12
+ <entry key="token" value="daa5264f4116533de57d32113fc539be09b278969305d061" />
13
+ </map>
14
+ </authParams2>
15
+ </JupyterConnectionParameters>
16
+ </list>
17
+ </option>
18
+ </component>
19
+ </project>
.idea/misc.xml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="gnn_course" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="gnn_course" project-jdk-type="Python SDK" />
7
+ <component name="editorHistoryManager">
8
+ <entry file="jupyter-remote://clear-antares.d2.comp.nus.edu.sg:9898/hf-compund/utils/process_utils.py">
9
+ <provider selected="true" editor-type-id="text-editor" />
10
+ </entry>
11
+ <entry file="jupyter-remote://clear-antares.d2.comp.nus.edu.sg:9898/hf-compund/utils/style_utils.py">
12
+ <provider selected="true" editor-type-id="text-editor" />
13
+ </entry>
14
+ <entry file="jupyter-remote://clear-antares.d2.comp.nus.edu.sg:9898/hf-compund/utils/inject_utils.py">
15
+ <provider selected="true" editor-type-id="text-editor" />
16
+ </entry>
17
+ </component>
18
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/GraphMAE.iml" filepath="$PROJECT_DIR$/.idea/GraphMAE.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/workspace.xml ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="05c16913-db5a-4c55-afca-e224d7acad63" name="更改" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="FileTemplateManagerImpl">
14
+ <option name="RECENT_TEMPLATES">
15
+ <list>
16
+ <option value="Jupyter Notebook" />
17
+ <option value="Python Script" />
18
+ </list>
19
+ </option>
20
+ </component>
21
+ <component name="ProjectColorInfo">{
22
+ &quot;associatedIndex&quot;: 2
23
+ }</component>
24
+ <component name="ProjectId" id="2q4Rbo6tLkJeIeQpF4BEetO5SMe" />
25
+ <component name="ProjectViewState">
26
+ <option name="hideEmptyMiddlePackages" value="true" />
27
+ <option name="showLibraryContents" value="true" />
28
+ </component>
29
+ <component name="PropertiesComponent">{
30
+ &quot;keyToString&quot;: {
31
+ &quot;Python.prepare_QM9_dataset.executor&quot;: &quot;Run&quot;,
32
+ &quot;Python.run.executor&quot;: &quot;Run&quot;,
33
+ &quot;Python.upload_dataset.executor&quot;: &quot;Run&quot;,
34
+ &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
35
+ &quot;last_opened_file_path&quot;: &quot;E:/python/2024.12.11/GraphMAE&quot;,
36
+ &quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
37
+ &quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
38
+ &quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
39
+ &quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
40
+ &quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
41
+ &quot;settings.editor.selected.configurable&quot;: &quot;org.jetbrains.plugins.notebooks.jupyter.connections.configuration.JupyterServerConfigurable&quot;,
42
+ &quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
43
+ }
44
+ }</component>
45
+ <component name="RecentsManager">
46
+ <key name="CopyFile.RECENT_KEYS">
47
+ <recent name="E:\python\2024.12.11\GraphMAE" />
48
+ <recent name="E:\python\2024.12.11\GraphMAE\dataset" />
49
+ </key>
50
+ </component>
51
+ <component name="RunAnythingCache">
52
+ <myKeys>
53
+ <visibility group="Grunt" flag="true" />
54
+ <visibility group="Gulp" flag="true" />
55
+ <visibility group="HTTP 请求" flag="true" />
56
+ <visibility group="Node.js" flag="true" />
57
+ <visibility group="npm" flag="true" />
58
+ <visibility group="yarn" flag="true" />
59
+ <visibility group="最近的项目" flag="true" />
60
+ <visibility group="运行 Python 文件" flag="true" />
61
+ <visibility group="运行 conda 命令" flag="true" />
62
+ <visibility group="运行 pip 命令" flag="true" />
63
+ <visibility group="运行配置" flag="true" />
64
+ </myKeys>
65
+ </component>
66
+ <component name="RunManager" selected="Python.upload_dataset">
67
+ <configuration name="prepare_QM9_dataset" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
68
+ <module name="GraphMAE" />
69
+ <option name="ENV_FILES" value="" />
70
+ <option name="INTERPRETER_OPTIONS" value="" />
71
+ <option name="PARENT_ENVS" value="true" />
72
+ <envs>
73
+ <env name="PYTHONUNBUFFERED" value="1" />
74
+ </envs>
75
+ <option name="SDK_HOME" value="" />
76
+ <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
77
+ <option name="IS_MODULE_SDK" value="true" />
78
+ <option name="ADD_CONTENT_ROOTS" value="true" />
79
+ <option name="ADD_SOURCE_ROOTS" value="true" />
80
+ <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
81
+ <option name="SCRIPT_NAME" value="$PROJECT_DIR$/prepare_QM9_dataset.py" />
82
+ <option name="PARAMETERS" value="--label_keys &quot;mu&quot; &quot;gap&quot;" />
83
+ <option name="SHOW_COMMAND_LINE" value="false" />
84
+ <option name="EMULATE_TERMINAL" value="false" />
85
+ <option name="MODULE_MODE" value="false" />
86
+ <option name="REDIRECT_INPUT" value="false" />
87
+ <option name="INPUT_FILE" value="" />
88
+ <method v="2" />
89
+ </configuration>
90
+ <configuration name="run" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
91
+ <module name="GraphMAE" />
92
+ <option name="ENV_FILES" value="" />
93
+ <option name="INTERPRETER_OPTIONS" value="" />
94
+ <option name="PARENT_ENVS" value="true" />
95
+ <envs>
96
+ <env name="PYTHONUNBUFFERED" value="1" />
97
+ </envs>
98
+ <option name="SDK_HOME" value="" />
99
+ <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
100
+ <option name="IS_MODULE_SDK" value="true" />
101
+ <option name="ADD_CONTENT_ROOTS" value="true" />
102
+ <option name="ADD_SOURCE_ROOTS" value="true" />
103
+ <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
104
+ <option name="SCRIPT_NAME" value="$PROJECT_DIR$/run.py" />
105
+ <option name="PARAMETERS" value="" />
106
+ <option name="SHOW_COMMAND_LINE" value="false" />
107
+ <option name="EMULATE_TERMINAL" value="false" />
108
+ <option name="MODULE_MODE" value="false" />
109
+ <option name="REDIRECT_INPUT" value="false" />
110
+ <option name="INPUT_FILE" value="" />
111
+ <method v="2" />
112
+ </configuration>
113
+ <configuration name="upload_dataset" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
114
+ <module name="GraphMAE" />
115
+ <option name="ENV_FILES" value="" />
116
+ <option name="INTERPRETER_OPTIONS" value="" />
117
+ <option name="PARENT_ENVS" value="true" />
118
+ <envs>
119
+ <env name="PYTHONUNBUFFERED" value="1" />
120
+ </envs>
121
+ <option name="SDK_HOME" value="" />
122
+ <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
123
+ <option name="IS_MODULE_SDK" value="true" />
124
+ <option name="ADD_CONTENT_ROOTS" value="true" />
125
+ <option name="ADD_SOURCE_ROOTS" value="true" />
126
+ <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
127
+ <option name="SCRIPT_NAME" value="$PROJECT_DIR$/upload_dataset.py" />
128
+ <option name="PARAMETERS" value="" />
129
+ <option name="SHOW_COMMAND_LINE" value="false" />
130
+ <option name="EMULATE_TERMINAL" value="false" />
131
+ <option name="MODULE_MODE" value="false" />
132
+ <option name="REDIRECT_INPUT" value="false" />
133
+ <option name="INPUT_FILE" value="" />
134
+ <method v="2" />
135
+ </configuration>
136
+ <list>
137
+ <item itemvalue="Python.prepare_QM9_dataset" />
138
+ <item itemvalue="Python.run" />
139
+ <item itemvalue="Python.upload_dataset" />
140
+ </list>
141
+ <recent_temporary>
142
+ <list>
143
+ <item itemvalue="Python.upload_dataset" />
144
+ <item itemvalue="Python.run" />
145
+ <item itemvalue="Python.prepare_QM9_dataset" />
146
+ </list>
147
+ </recent_temporary>
148
+ </component>
149
+ <component name="SharedIndexes">
150
+ <attachedChunks>
151
+ <set>
152
+ <option value="bundled-js-predefined-1d06a55b98c1-74d2a5396914-JavaScript-PY-241.14494.241" />
153
+ <option value="bundled-python-sdk-0509580d9d50-28c9f5db9ffe-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-241.14494.241" />
154
+ </set>
155
+ </attachedChunks>
156
+ </component>
157
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
158
+ <component name="TaskManager">
159
+ <task active="true" id="Default" summary="默认任务">
160
+ <changelist id="05c16913-db5a-4c55-afca-e224d7acad63" name="更改" comment="" />
161
+ <created>1733919936165</created>
162
+ <option name="number" value="Default" />
163
+ <option name="presentableId" value="Default" />
164
+ <updated>1733919936165</updated>
165
+ <workItem from="1733919937766" duration="436000" />
166
+ <workItem from="1733920389980" duration="2907000" />
167
+ <workItem from="1733963667269" duration="27745000" />
168
+ <workItem from="1734055088372" duration="2434000" />
169
+ <workItem from="1734059757119" duration="24956000" />
170
+ <workItem from="1734143010460" duration="154000" />
171
+ </task>
172
+ <servers />
173
+ </component>
174
+ <component name="TypeScriptGeneratedFilesManager">
175
+ <option name="version" value="3" />
176
+ </component>
177
+ <component name="com.intellij.coverage.CoverageDataManagerImpl">
178
+ <SUITE FILE_PATH="coverage/GraphMAE$prepare_QM9_dataset.coverage" NAME="prepare_QM9_dataset 覆盖结果" MODIFIED="1734073819845" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
179
+ <SUITE FILE_PATH="coverage/GraphMAE$upload_dataset.coverage" NAME="upload_dataset 覆盖结果" MODIFIED="1734105927859" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
180
+ <SUITE FILE_PATH="coverage/GraphMAE$run.coverage" NAME="run 覆盖结果" MODIFIED="1734096435032" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
181
+ </component>
182
+ </project>
.ipynb_checkpoints/DataInspect-checkpoint.ipynb ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "metadata": {
5
+ "ExecuteTime": {
6
+ "end_time": "2024-12-12T16:11:02.453452Z",
7
+ "start_time": "2024-12-12T16:10:59.783010Z"
8
+ }
9
+ },
10
+ "cell_type": "code",
11
+ "source": [
12
+ "import os\n",
13
+ "import time\n",
14
+ "from rdkit import Chem\n",
15
+ "from rdkit import RDLogger;\n",
16
+ "from torch.utils.data import Dataset\n",
17
+ "import torch.nn.functional as F\n",
18
+ "RDLogger.DisableLog('rdApp.*')\n",
19
+ "import torch\n",
20
+ "import torch.nn as nn\n",
21
+ "import torch.optim as optim\n",
22
+ "import pickle\n",
23
+ "import numpy as np\n",
24
+ "import matplotlib.pyplot as plt\n",
25
+ "import math\n",
26
+ "import dgl\n",
27
+ "import networkx as nx"
28
+ ],
29
+ "id": "1517383df6eb646",
30
+ "outputs": [],
31
+ "execution_count": 1
32
+ },
33
+ {
34
+ "metadata": {
35
+ "ExecuteTime": {
36
+ "end_time": "2024-12-12T16:11:02.468893Z",
37
+ "start_time": "2024-12-12T16:11:02.454576Z"
38
+ }
39
+ },
40
+ "cell_type": "code",
41
+ "source": [
42
+ "atom_number_index_dict ={\n",
43
+ " 1:0,\n",
44
+ " 6:1,\n",
45
+ " 7:2,\n",
46
+ " 8:3,\n",
47
+ " 9:4\n",
48
+ "}\n",
49
+ "atom_index_number_dict ={\n",
50
+ " 0:1,\n",
51
+ " 1:6,\n",
52
+ " 2:7,\n",
53
+ " 3:8,\n",
54
+ " 4:9\n",
55
+ "}\n",
56
+ "def atom_number2index(atom_number):\n",
57
+ " return atom_number_index_dict[atom_number]\n",
58
+ "def atom_index2number(atom_index):\n",
59
+ " return atom_index_number_dict[atom_index]"
60
+ ],
61
+ "id": "697783252f244e50",
62
+ "outputs": [],
63
+ "execution_count": 2
64
+ },
65
+ {
66
+ "metadata": {
67
+ "ExecuteTime": {
68
+ "end_time": "2024-12-12T16:11:03.301916Z",
69
+ "start_time": "2024-12-12T16:11:03.243204Z"
70
+ }
71
+ },
72
+ "cell_type": "code",
73
+ "source": [
74
+ "from dgl.data import QM9Dataset\n",
75
+ "from torch.utils.data import SubsetRandomSampler\n",
76
+ "from dgl.dataloading import GraphDataLoader\n",
77
+ "\n",
78
+ "dataset = QM9Dataset(label_keys=['mu', 'gap'], cutoff=5.0)\n",
79
+ "dataset_length = len(dataset)\n",
80
+ "train_idx = torch.arange(dataset_length)\n",
81
+ "train_sampler = SubsetRandomSampler(train_idx)\n",
82
+ "def collate_fn(batch):\n",
83
+ " graphs, labels = map(list, zip(*batch))\n",
84
+ " for g in graphs:\n",
85
+ " # g.ndata[\"R\"]->the coordinates of each atom[num_nodes,3], g.ndata[\"Z\"]->the atomic number(H:1,C:6) [num_nodes]\n",
86
+ " g.ndata[\"Z_index\"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata[\"Z\"]])\n",
87
+ " batched_graph = dgl.batch(graphs)\n",
88
+ " return batched_graph, torch.stack(labels)\n",
89
+ "myGLoader = GraphDataLoader(dataset,collate_fn=collate_fn,batch_size=5, pin_memory=True)"
90
+ ],
91
+ "id": "7074f5a11a15ebc6",
92
+ "outputs": [],
93
+ "execution_count": 3
94
+ },
95
+ {
96
+ "metadata": {
97
+ "ExecuteTime": {
98
+ "end_time": "2024-12-12T15:59:44.314049Z",
99
+ "start_time": "2024-12-12T15:59:44.299072Z"
100
+ }
101
+ },
102
+ "cell_type": "code",
103
+ "source": [
104
+ "# atom_numbers = []\n",
105
+ "# for g,_ in dataset:\n",
106
+ "# for atom_z in g.ndata[\"Z\"]:\n",
107
+ "# if atom_z not in atom_numbers:\n",
108
+ "# atom_numbers.append(atom_z)\n",
109
+ "# print(atom_numbers)"
110
+ ],
111
+ "id": "5758841a1f281514",
112
+ "outputs": [],
113
+ "execution_count": 12
114
+ },
115
+ {
116
+ "metadata": {
117
+ "ExecuteTime": {
118
+ "end_time": "2024-12-12T16:14:46.301537Z",
119
+ "start_time": "2024-12-12T16:11:12.606641Z"
120
+ }
121
+ },
122
+ "cell_type": "code",
123
+ "source": [
124
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
125
+ "for batch in myGLoader:\n",
126
+ " batch_g,label = batch\n",
127
+ " batch_g.to(device)\n",
128
+ " "
129
+ ],
130
+ "id": "13bcff8166b0e35b",
131
+ "outputs": [],
132
+ "execution_count": 5
133
+ },
134
+ {
135
+ "metadata": {
136
+ "ExecuteTime": {
137
+ "end_time": "2024-12-12T16:07:15.834856Z",
138
+ "start_time": "2024-12-12T16:07:15.822783Z"
139
+ }
140
+ },
141
+ "cell_type": "code",
142
+ "source": [
143
+ "from functools import partial\n",
144
+ "import sys\n",
145
+ "sys.path.append(\"lib\")\n",
146
+ "from lib.metrics import sce_loss\n",
147
+ "\n",
148
+ "class GMae(nn.Module):\n",
149
+ " def __init__(self, encoder,decoder,\n",
150
+ " in_dim,hidden_dim,out_dim,mask_rate=0.3,replace_rate=0.1,alpha_l=2,\n",
151
+ " embedding_layer_classes=4,embedding_layer_dim=4):\n",
152
+ " super(GMae, self).__init__()\n",
153
+ " self.Z_embedding = nn.Embedding(embedding_layer_classes,embedding_layer_dim)\n",
154
+ " self.encoder = encoder\n",
155
+ " self.decoder = decoder\n",
156
+ " self.mask_rate = mask_rate\n",
157
+ " self.replace_rate = replace_rate\n",
158
+ " self.alpha_l = alpha_l\n",
159
+ " self.in_dim = in_dim\n",
160
+ " self.hidden_dim = hidden_dim\n",
161
+ " self.out_dim = out_dim\n",
162
+ " self.embedding_layer_classes = embedding_layer_classes\n",
163
+ " self.embedding_layer_dim = embedding_layer_dim\n",
164
+ " self.enc_mask_token = nn.Parameter(torch.zeros(1,in_dim))\n",
165
+ " self.criterion = partial(sce_loss, alpha=alpha_l)\n",
166
+ " self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)\n",
167
+ " def encode_atom_index(self,Z_index):\n",
168
+ " return self.Z_embedding(Z_index)\n",
169
+ " def encoding_mask_noise(self, g, x, mask_rate=0.3):\n",
170
+ " num_nodes = g.num_nodes()\n",
171
+ " perm = torch.randperm(num_nodes, device=x.device)\n",
172
+ " # random masking\n",
173
+ " num_mask_nodes = int(mask_rate * num_nodes)\n",
174
+ " mask_nodes = perm[: num_mask_nodes]\n",
175
+ " keep_nodes = perm[num_mask_nodes: ]\n",
176
+ "\n",
177
+ " if self.replace_rate > 0:\n",
178
+ " num_noise_nodes = int(self.replace_rate * num_mask_nodes)\n",
179
+ " perm_mask = torch.randperm(num_mask_nodes, device=x.device)\n",
180
+ " token_nodes = mask_nodes[perm_mask[: int((1-self.replace_rate) * num_mask_nodes)]]\n",
181
+ " noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]\n",
182
+ " noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]\n",
183
+ " out_x = x.clone()\n",
184
+ " out_x[token_nodes] = 0.0\n",
185
+ " out_x[noise_nodes] = x[noise_to_be_chosen]\n",
186
+ " else:\n",
187
+ " out_x = x.clone()\n",
188
+ " token_nodes = mask_nodes\n",
189
+ " out_x[mask_nodes] = 0.0\n",
190
+ "\n",
191
+ " out_x[token_nodes] += self.enc_mask_token\n",
192
+ " use_g = g.clone()\n",
193
+ "\n",
194
+ " return use_g, out_x, (mask_nodes, keep_nodes) \n",
195
+ " def mask_attr_prediction(self, g, x):\n",
196
+ " use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)\n",
197
+ " enc_rep = self.encoder(use_g, use_x)\n",
198
+ " # ---- attribute reconstruction ----\n",
199
+ " rep = self.encoder_to_decoder(enc_rep)\n",
200
+ " recon = self.decoder(use_g, rep)\n",
201
+ " x_init = x[mask_nodes]\n",
202
+ " x_rec = recon[mask_nodes]\n",
203
+ " loss = self.criterion(x_rec, x_init)\n",
204
+ " return loss\n",
205
+ "\n",
206
+ " def embed(self, g, x):\n",
207
+ " rep = self.encoder(g, x)\n",
208
+ " return rep\n",
209
+ " "
210
+ ],
211
+ "id": "1a5caea191a642bc",
212
+ "outputs": [],
213
+ "execution_count": 5
214
+ },
215
+ {
216
+ "metadata": {
217
+ "ExecuteTime": {
218
+ "end_time": "2024-12-12T16:07:18.136174Z",
219
+ "start_time": "2024-12-12T16:07:18.122456Z"
220
+ }
221
+ },
222
+ "cell_type": "code",
223
+ "source": [
224
+ "import dgl.nn as dglnn\n",
225
+ "import torch.nn as nn\n",
226
+ "import torch.nn.functional as F\n",
227
+ "class SimpleGNN(nn.Module):\n",
228
+ " def __init__(self, in_feats, hid_feats, out_feats):\n",
229
+ " super().__init__()\n",
230
+ " self.conv1 = dglnn.SAGEConv(\n",
231
+ " in_feats=in_feats, out_feats=hid_feats,aggregator_type=\"mean\")\n",
232
+ " self.conv2 = dglnn.SAGEConv(\n",
233
+ " in_feats=hid_feats, out_feats=out_feats,aggregator_type=\"mean\")\n",
234
+ "\n",
235
+ " def forward(self, graph, inputs):\n",
236
+ " # 输入是节点的特征\n",
237
+ " h = self.conv1(graph, inputs)\n",
238
+ " h = F.relu(h)\n",
239
+ " h = self.conv2(graph, h)\n",
240
+ " return h"
241
+ ],
242
+ "id": "c99cb509ac0f1054",
243
+ "outputs": [],
244
+ "execution_count": 6
245
+ },
246
+ {
247
+ "metadata": {
248
+ "ExecuteTime": {
249
+ "end_time": "2024-12-12T16:07:21.516476Z",
250
+ "start_time": "2024-12-12T16:07:21.118135Z"
251
+ }
252
+ },
253
+ "cell_type": "code",
254
+ "source": [
255
+ "sage_enc = SimpleGNN(in_feats=7,hid_feats=4,out_feats=4)\n",
256
+ "sage_dec = SimpleGNN(in_feats=4,hid_feats=4,out_feats=7)\n",
257
+ "gmae = GMae(sage_enc,sage_dec,7,4,7,replace_rate=0)\n",
258
+ "epoches = 20\n",
259
+ "optimizer = optim.Adam(gmae.parameters(), lr=1e-3)\n",
260
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
261
+ ],
262
+ "id": "5a8a4e4dd753b642",
263
+ "outputs": [],
264
+ "execution_count": 7
265
+ },
266
+ {
267
+ "metadata": {
268
+ "ExecuteTime": {
269
+ "end_time": "2024-12-12T16:10:52.354863Z",
270
+ "start_time": "2024-12-12T16:10:52.311090Z"
271
+ }
272
+ },
273
+ "cell_type": "code",
274
+ "source": [
275
+ "print(f\"epoch {0} started!\")\n",
276
+ "gmae.train()\n",
277
+ "gmae.encoder.train()\n",
278
+ "gmae.decoder.train()\n",
279
+ "gmae.to(device)\n",
280
+ "loss_epoch = 0\n",
281
+ "for batch in myGLoader:\n",
282
+ " optimizer.zero_grad()\n",
283
+ " batch_g, _ = batch\n",
284
+ " R = batch_g.ndata[\"R\"].to(device)\n",
285
+ " Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
286
+ " Z_emb = gmae.encode_atom_index(Z_index)\n",
287
+ " feat = torch.cat([R,Z_emb],dim=1)\n",
288
+ " batch_g = batch_g.to(device)\n",
289
+ " # loss = gmae.mask_attr_prediction(batch_g, feat)\n",
290
+ " # loss.backward()\n",
291
+ " # optimizer.step()\n",
292
+ " # loss_epoch+=loss.item()"
293
+ ],
294
+ "id": "224529a988b81ef5",
295
+ "outputs": [
296
+ {
297
+ "name": "stdout",
298
+ "output_type": "stream",
299
+ "text": [
300
+ "epoch 0 started!\n"
301
+ ]
302
+ },
303
+ {
304
+ "ename": "RuntimeError",
305
+ "evalue": "CUDA error: device-side assert triggered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
306
+ "output_type": "error",
307
+ "traceback": [
308
+ "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
309
+ "\u001B[1;31mRuntimeError\u001B[0m Traceback (most recent call last)",
310
+ "Cell \u001B[1;32mIn[12], line 10\u001B[0m\n\u001B[0;32m 8\u001B[0m optimizer\u001B[38;5;241m.\u001B[39mzero_grad()\n\u001B[0;32m 9\u001B[0m batch_g, _ \u001B[38;5;241m=\u001B[39m batch\n\u001B[1;32m---> 10\u001B[0m R \u001B[38;5;241m=\u001B[39m \u001B[43mbatch_g\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mndata\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mR\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m]\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mto\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdevice\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 11\u001B[0m Z_index \u001B[38;5;241m=\u001B[39m batch_g\u001B[38;5;241m.\u001B[39mndata[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mZ_index\u001B[39m\u001B[38;5;124m\"\u001B[39m]\u001B[38;5;241m.\u001B[39mto(device)\n\u001B[0;32m 12\u001B[0m Z_emb \u001B[38;5;241m=\u001B[39m gmae\u001B[38;5;241m.\u001B[39mencode_atom_index(Z_index)\n",
311
+ "\u001B[1;31mRuntimeError\u001B[0m: CUDA error: device-side assert triggered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n"
312
+ ]
313
+ }
314
+ ],
315
+ "execution_count": 12
316
+ },
317
+ {
318
+ "metadata": {
319
+ "ExecuteTime": {
320
+ "end_time": "2024-12-12T15:59:46.502050Z",
321
+ "start_time": "2024-12-12T15:59:44.442506Z"
322
+ }
323
+ },
324
+ "cell_type": "code",
325
+ "source": [
326
+ "from datetime import datetime\n",
327
+ "\n",
328
+ "current_time = datetime.now().strftime(\"%m-%d@%H_%M\")\n",
329
+ "best_loss = 10000\n",
330
+ "for epoch in range(epoches):\n",
331
+ " print(f\"epoch {epoch} started!\")\n",
332
+ " gmae.train()\n",
333
+ " gmae.encoder.train()\n",
334
+ " gmae.decoder.train()\n",
335
+ " gmae.to(device)\n",
336
+ " loss_epoch = 0\n",
337
+ " for batch in myGLoader:\n",
338
+ " optimizer.zero_grad()\n",
339
+ " batch_g, _ = batch\n",
340
+ " R = batch_g.ndata[\"R\"].to(device)\n",
341
+ " Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
342
+ " Z_emb = gmae.encode_atom_index(Z_index)\n",
343
+ " feat = torch.cat([R,Z_emb],dim=1)\n",
344
+ " batch_g = batch_g.to(device)\n",
345
+ " loss = gmae.mask_attr_prediction(batch_g, feat)\n",
346
+ " loss.backward()\n",
347
+ " optimizer.step()\n",
348
+ " loss_epoch+=loss.item()\n",
349
+ " if loss_epoch < best_loss:\n",
350
+ " formatted_loss_epoch = f\"{loss_epoch:.3f}\"\n",
351
+ " save_path = f\"./experiments/consumption/gmae/{current_time}/gmae_epoch-{epoch}-{formatted_loss_epoch}.pt\"\n",
352
+ " save_dir = os.path.dirname(save_path)\n",
353
+ " if not os.path.exists(save_dir):\n",
354
+ " os.makedirs(save_dir,exist_ok=True)\n",
355
+ " torch.save(gmae.state_dict(), save_path)\n",
356
+ " best_loss = loss_epoch\n",
357
+ " print(f\"best model saved-loss:{formatted_loss_epoch}-save_path:{save_path}\")\n",
358
+ " print(f\"epoch {epoch}: loss {loss_epoch}\")"
359
+ ],
360
+ "id": "a22599c4e591125b",
361
+ "outputs": [
362
+ {
363
+ "name": "stdout",
364
+ "output_type": "stream",
365
+ "text": [
366
+ "epoch 0 started!\n"
367
+ ]
368
+ },
369
+ {
370
+ "ename": "RuntimeError",
371
+ "evalue": "CUDA error: device-side assert triggered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
372
+ "output_type": "error",
373
+ "traceback": [
374
+ "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
375
+ "\u001B[1;31mRuntimeError\u001B[0m Traceback (most recent call last)",
376
+ "Cell \u001B[1;32mIn[18], line 19\u001B[0m\n\u001B[0;32m 17\u001B[0m Z_emb \u001B[38;5;241m=\u001B[39m gmae\u001B[38;5;241m.\u001B[39mencode_atom_index(Z_index)\n\u001B[0;32m 18\u001B[0m feat \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mcat([R,Z_emb],dim\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m)\n\u001B[1;32m---> 19\u001B[0m batch_g \u001B[38;5;241m=\u001B[39m \u001B[43mbatch_g\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mto\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdevice\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 20\u001B[0m loss \u001B[38;5;241m=\u001B[39m gmae\u001B[38;5;241m.\u001B[39mmask_attr_prediction(batch_g, feat)\n\u001B[0;32m 21\u001B[0m loss\u001B[38;5;241m.\u001B[39mbackward()\n",
377
+ "File \u001B[1;32mE:\\Anaconda\\envs\\gnn_course\\lib\\site-packages\\dgl\\heterograph.py:5730\u001B[0m, in \u001B[0;36mDGLGraph.to\u001B[1;34m(self, device, **kwargs)\u001B[0m\n\u001B[0;32m 5728\u001B[0m \u001B[38;5;66;03m# 2. Copy misc info\u001B[39;00m\n\u001B[0;32m 5729\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_batch_num_nodes \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[1;32m-> 5730\u001B[0m new_bnn \u001B[38;5;241m=\u001B[39m {\n\u001B[0;32m 5731\u001B[0m k: F\u001B[38;5;241m.\u001B[39mcopy_to(num, device, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n\u001B[0;32m 5732\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m k, num \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_batch_num_nodes\u001B[38;5;241m.\u001B[39mitems()\n\u001B[0;32m 5733\u001B[0m }\n\u001B[0;32m 5734\u001B[0m ret\u001B[38;5;241m.\u001B[39m_batch_num_nodes \u001B[38;5;241m=\u001B[39m new_bnn\n\u001B[0;32m 5735\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_batch_num_edges \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n",
378
+ "File \u001B[1;32mE:\\Anaconda\\envs\\gnn_course\\lib\\site-packages\\dgl\\heterograph.py:5731\u001B[0m, in \u001B[0;36m<dictcomp>\u001B[1;34m(.0)\u001B[0m\n\u001B[0;32m 5728\u001B[0m \u001B[38;5;66;03m# 2. Copy misc info\u001B[39;00m\n\u001B[0;32m 5729\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_batch_num_nodes \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m 5730\u001B[0m new_bnn \u001B[38;5;241m=\u001B[39m {\n\u001B[1;32m-> 5731\u001B[0m k: \u001B[43mF\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcopy_to\u001B[49m\u001B[43m(\u001B[49m\u001B[43mnum\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdevice\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 5732\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m k, num \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_batch_num_nodes\u001B[38;5;241m.\u001B[39mitems()\n\u001B[0;32m 5733\u001B[0m }\n\u001B[0;32m 5734\u001B[0m ret\u001B[38;5;241m.\u001B[39m_batch_num_nodes \u001B[38;5;241m=\u001B[39m new_bnn\n\u001B[0;32m 5735\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_batch_num_edges \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n",
379
+ "File \u001B[1;32mE:\\Anaconda\\envs\\gnn_course\\lib\\site-packages\\dgl\\backend\\pytorch\\tensor.py:143\u001B[0m, in \u001B[0;36mcopy_to\u001B[1;34m(input, ctx, **kwargs)\u001B[0m\n\u001B[0;32m 141\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m ctx\u001B[38;5;241m.\u001B[39mindex \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m 142\u001B[0m th\u001B[38;5;241m.\u001B[39mcuda\u001B[38;5;241m.\u001B[39mset_device(ctx\u001B[38;5;241m.\u001B[39mindex)\n\u001B[1;32m--> 143\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43minput\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcuda\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m 144\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m 145\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mRuntimeError\u001B[39;00m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mInvalid context\u001B[39m\u001B[38;5;124m\"\u001B[39m, ctx)\n",
380
+ "\u001B[1;31mRuntimeError\u001B[0m: CUDA error: device-side assert triggered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n"
381
+ ]
382
+ }
383
+ ],
384
+ "execution_count": 18
385
+ }
386
+ ],
387
+ "metadata": {
388
+ "kernelspec": {
389
+ "name": "gnn_course",
390
+ "language": "python",
391
+ "display_name": "gnn_course"
392
+ }
393
+ },
394
+ "nbformat": 4,
395
+ "nbformat_minor": 5
396
+ }
.ipynb_checkpoints/Untitled-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 5
6
+ }
.virtual_documents/DataInspect.ipynb ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from rdkit import Chem
4
+ from rdkit import RDLogger;
5
+ from torch.utils.data import Dataset
6
+ import torch.nn.functional as F
7
+ from tqdm import tqdm
8
+ RDLogger.DisableLog('rdApp.*')
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.optim as optim
12
+ import pickle
13
+ import numpy as np
14
+ import matplotlib.pyplot as plt
15
+ import math
16
+ import dgl
17
+ import networkx as nx
18
+
19
+
20
+ atom_number_index_dict ={
21
+ 1:0, # H
22
+ 6:1, # C
23
+ 7:2, # N
24
+ 8:3, # O
25
+ 9:4 # F
26
+ }
27
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+ atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}
29
+ max_atom_number = max(atom_number_index_dict.keys())
30
+ atom_number2index_tensor = torch.full((max_atom_number + 1,), -1)
31
+ for k, v in atom_number_index_dict.items():
32
+ atom_number2index_tensor[k] = v
33
+
34
+ atom_index2number_tensor = torch.tensor([atom_index_number_dict[i] for i in range(len(atom_index_number_dict))])
35
+ def atom_number2index(atom_numbers):
36
+ return atom_number2index_tensor[atom_numbers]
37
+ def atom_index2number(atom_indexes):
38
+ return atom_index2number_tensor[atom_indexes]
39
+
40
+
41
+ from dgl.data import QM9Dataset
42
+ from torch.utils.data import SubsetRandomSampler
43
+ from dgl.dataloading import GraphDataLoader
44
+ from multiprocessing import Pool
45
+
46
+ dataset = QM9Dataset(label_keys=['mu', 'gap'], cutoff=5.0)
47
+ dataset_length = len(dataset)
48
+ train_idx = torch.arange(dataset_length)
49
+ # def preprocess_graph(data):
50
+ # g, label = data
51
+ # g.ndata["Z_index"] = atom_number2index(g.ndata["Z"])
52
+ # return g, label
53
+
54
+ # def preprocess_dataset(dataset):
55
+ # with Pool(processes=4) as pool: # 设置进程数
56
+ # with tqdm(total=len(dataset)) as pbar: # 初始化进度条
57
+ # results = []
58
+ # for result in pool.imap(preprocess_graph, dataset): # 使用 imap 逐步处理
59
+ # results.append(result)
60
+ # pbar.update(1) # 更新进度条
61
+ # return results
62
+
63
+ # # 使用多进程预处理数据集
64
+ # dataset = preprocess_dataset(dataset)
65
+
66
+ # def collate_fn(batch):
67
+ # # print(batch)
68
+ # graphs, labels = map(list, zip(*batch))
69
+ # for g in graphs:
70
+ # pass
71
+ # # g.ndata["Z_index"] = atom_number2index(g.ndata["Z"])
72
+ # # g.ndata["R"]->the coordinates of each atom[num_nodes,3], g.ndata["Z"]->the atomic number(H:1,C:6) [num_nodes]
73
+ # # g.ndata["Z_index"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata["Z"]])
74
+ # batched_graph = dgl.batch(graphs)
75
+ # return batched_graph, torch.stack(labels)
76
+ myGLoader = GraphDataLoader(dataset,batch_size=32,pin_memory=True,num_workers=8)
77
+
78
+
79
+ # for batch in tqdm(myGLoader):
80
+ # pass
81
+ # # print(batch)
82
+
83
+
84
+
85
+ from functools import partial
86
+ import sys
87
+ sys.path.append("lib")
88
+ from lib.metrics import sce_loss
89
+
90
+ class GMae(nn.Module):
91
+ def __init__(self, encoder,decoder,
92
+ in_dim,hidden_dim,out_dim,mask_rate=0.3,replace_rate=0.1,alpha_l=2,
93
+ embedding_layer_classes=5,embedding_layer_dim=4):
94
+ super(GMae, self).__init__()
95
+ self.Z_embedding = nn.Embedding(embedding_layer_classes,embedding_layer_dim)
96
+ self.encoder = encoder
97
+ self.decoder = decoder
98
+ self.mask_rate = mask_rate
99
+ self.replace_rate = replace_rate
100
+ self.alpha_l = alpha_l
101
+ self.in_dim = in_dim
102
+ self.hidden_dim = hidden_dim
103
+ self.out_dim = out_dim
104
+ self.embedding_layer_classes = embedding_layer_classes
105
+ self.embedding_layer_dim = embedding_layer_dim
106
+ self.enc_mask_token = nn.Parameter(torch.zeros(1,in_dim))
107
+ self.criterion = partial(sce_loss, alpha=alpha_l)
108
+ self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)
109
+ def encode_atom_index(self,Z_index):
110
+ return self.Z_embedding(Z_index)
111
+ def encoding_mask_noise(self, g, x, mask_rate=0.3):
112
+ num_nodes = g.num_nodes()
113
+ perm = torch.randperm(num_nodes, device=x.device)
114
+ # random masking
115
+ num_mask_nodes = int(mask_rate * num_nodes)
116
+ mask_nodes = perm[: num_mask_nodes]
117
+ keep_nodes = perm[num_mask_nodes: ]
118
+
119
+ if self.replace_rate > 0:
120
+ num_noise_nodes = int(self.replace_rate * num_mask_nodes)
121
+ perm_mask = torch.randperm(num_mask_nodes, device=x.device)
122
+ token_nodes = mask_nodes[perm_mask[: int((1-self.replace_rate) * num_mask_nodes)]]
123
+ noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]
124
+ noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]
125
+ out_x = x.clone()
126
+ out_x[token_nodes] = 0.0
127
+ out_x[noise_nodes] = x[noise_to_be_chosen]
128
+ else:
129
+ out_x = x.clone()
130
+ token_nodes = mask_nodes
131
+ out_x[mask_nodes] = 0.0
132
+
133
+ out_x[token_nodes] += self.enc_mask_token
134
+ use_g = g.clone()
135
+
136
+ return use_g, out_x, (mask_nodes, keep_nodes)
137
+ def mask_attr_prediction(self, g, x):
138
+ use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)
139
+ enc_rep = self.encoder(use_g, use_x)
140
+ # ---- attribute reconstruction ----
141
+ rep = self.encoder_to_decoder(enc_rep)
142
+ recon = self.decoder(use_g, rep)
143
+ x_init = x[mask_nodes]
144
+ x_rec = recon[mask_nodes]
145
+ loss = self.criterion(x_rec, x_init)
146
+ return loss
147
+
148
+ def embed(self, g, x):
149
+ rep = self.encoder(g, x)
150
+ return rep
151
+
152
+
153
+
154
+ import dgl.nn as dglnn
155
+ import torch.nn as nn
156
+ import torch.nn.functional as F
157
+ class SimpleGNN(nn.Module):
158
+ def __init__(self, in_feats, hid_feats, out_feats):
159
+ super().__init__()
160
+ self.conv1 = dglnn.SAGEConv(
161
+ in_feats=in_feats, out_feats=hid_feats,aggregator_type="mean")
162
+ self.conv2 = dglnn.SAGEConv(
163
+ in_feats=hid_feats, out_feats=out_feats,aggregator_type="mean")
164
+
165
+ def forward(self, graph, inputs):
166
+ # 输入是节点的特征
167
+ h = self.conv1(graph, inputs)
168
+ h = F.relu(h)
169
+ h = self.conv2(graph, h)
170
+ return h
171
+
172
+
173
+ sage_enc = SimpleGNN(in_feats=7,hid_feats=4,out_feats=4)
174
+ sage_dec = SimpleGNN(in_feats=4,hid_feats=4,out_feats=7)
175
+ gmae = GMae(sage_enc,sage_dec,7,4,7,replace_rate=0)
176
+ epoches = 20
177
+ optimizer = optim.Adam(gmae.parameters(), lr=1e-3)
178
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
179
+
180
+
181
+ print(f"epoch {0} started!")
182
+ gmae.train()
183
+ gmae.encoder.train()
184
+ gmae.decoder.train()
185
+ gmae.to(device)
186
+ loss_epoch = 0
187
+ import os
188
+ os.environ["CUDA_LAUNCH_BLOCKING"]="1"
189
+ for batch in tqdm(myGLoader):
190
+ optimizer.zero_grad()
191
+ batch_g, _ = batch
192
+ R = batch_g.ndata["R"].to(device)
193
+ Z_index = batch_g.ndata["Z"].to(device)
194
+ Z_emb = gmae.encode_atom_index(Z_index)
195
+ # feat = torch.cat([R,Z_emb],dim=1)
196
+ # batch_g = batch_g.to(device)
197
+ # loss = gmae.mask_attr_prediction(batch_g, feat)
198
+ # loss.backward()
199
+ # optimizer.step()
200
+ # loss_epoch+=loss.item()
201
+
202
+
203
+
204
+ from datetime import datetime
205
+
206
+ current_time = datetime.now().strftime("%m-%d@%H_%M")
207
+ best_loss = 10000
208
+ for epoch in range(epoches):
209
+ print(f"epoch {epoch} started!")
210
+ gmae.train()
211
+ gmae.encoder.train()
212
+ gmae.decoder.train()
213
+ gmae.to(device)
214
+ loss_epoch = 0
215
+ for batch in myGLoader:
216
+ optimizer.zero_grad()
217
+ batch_g, _ = batch
218
+ R = batch_g.ndata["R"].to(device)
219
+ # Z_index = batch_g.ndata["Z_index"].to(device)
220
+ Z_index = batch_g.ndata["Z_index"].to(device)
221
+ Z_emb = gmae.encode_atom_index(Z_index)
222
+ feat = torch.cat([R,Z_emb],dim=1)
223
+ batch_g = batch_g.to(device)
224
+ loss = gmae.mask_attr_prediction(batch_g, feat)
225
+ loss.backward()
226
+ optimizer.step()
227
+ loss_epoch+=loss.item()
228
+ if loss_epoch < best_loss:
229
+ formatted_loss_epoch = f"{loss_epoch:.3f}"
230
+ save_path = f"./experiments/consumption/gmae/{current_time}/gmae_epoch-{epoch}-{formatted_loss_epoch}.pt"
231
+ save_dir = os.path.dirname(save_path)
232
+ if not os.path.exists(save_dir):
233
+ os.makedirs(save_dir,exist_ok=True)
234
+ torch.save(gmae.state_dict(), save_path)
235
+ best_loss = loss_epoch
236
+ print(f"best model saved-loss:{formatted_loss_epoch}-save_path:{save_path}")
237
+ print(f"epoch {epoch}: loss {loss_epoch}")
DataInspect.ipynb ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "1517383df6eb646",
7
+ "metadata": {
8
+ "ExecuteTime": {
9
+ "end_time": "2024-12-13T13:13:56.347478Z",
10
+ "start_time": "2024-12-13T13:13:52.210350Z"
11
+ }
12
+ },
13
+ "outputs": [],
14
+ "source": [
15
+ "import os\n",
16
+ "import time\n",
17
+ "from rdkit import Chem\n",
18
+ "from rdkit import RDLogger;\n",
19
+ "from torch.utils.data import Dataset\n",
20
+ "import torch.nn.functional as F\n",
21
+ "from tqdm import tqdm\n",
22
+ "RDLogger.DisableLog('rdApp.*')\n",
23
+ "import torch\n",
24
+ "import torch.nn as nn\n",
25
+ "import torch.optim as optim\n",
26
+ "import pickle\n",
27
+ "import numpy as np\n",
28
+ "import matplotlib.pyplot as plt\n",
29
+ "import math\n",
30
+ "import dgl\n",
31
+ "import networkx as nx"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": 3,
37
+ "id": "697783252f244e50",
38
+ "metadata": {
39
+ "ExecuteTime": {
40
+ "end_time": "2024-12-13T04:02:54.040212Z",
41
+ "start_time": "2024-12-13T04:02:54.034215Z"
42
+ }
43
+ },
44
+ "outputs": [],
45
+ "source": [
46
+ "atom_number_index_dict ={\n",
47
+ " 1:0, # H\n",
48
+ " 6:1, # C\n",
49
+ " 7:2, # N\n",
50
+ " 8:3, # O\n",
51
+ " 9:4 # F\n",
52
+ "} \n",
53
+ "# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}\n",
55
+ "max_atom_number = max(atom_number_index_dict.keys())\n",
56
+ "atom_number2index_tensor = torch.full((max_atom_number + 1,), -1)\n",
57
+ "for k, v in atom_number_index_dict.items():\n",
58
+ " atom_number2index_tensor[k] = v\n",
59
+ "\n",
60
+ "atom_index2number_tensor = torch.tensor([atom_index_number_dict[i] for i in range(len(atom_index_number_dict))])\n",
61
+ "def atom_number2index(atom_number):\n",
62
+ " return atom_number_index_dict[atom_number]\n",
63
+ "def atom_index2number(atom_index):\n",
64
+ " return atom_index_number_dict[atom_index]"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": 4,
70
+ "id": "7074f5a11a15ebc6",
71
+ "metadata": {
72
+ "ExecuteTime": {
73
+ "end_time": "2024-12-13T04:05:20.426859Z",
74
+ "start_time": "2024-12-13T04:02:57.613812Z"
75
+ }
76
+ },
77
+ "outputs": [
78
+ {
79
+ "name": "stderr",
80
+ "output_type": "stream",
81
+ "text": [
82
+ "100%|██████████| 130831/130831 [02:22<00:00, 916.44it/s] \n"
83
+ ]
84
+ }
85
+ ],
86
+ "source": [
87
+ "from dgl.data import QM9Dataset\n",
88
+ "from torch.utils.data import SubsetRandomSampler\n",
89
+ "from dgl.dataloading import GraphDataLoader\n",
90
+ "from multiprocessing import Pool\n",
91
+ "\n",
92
+ "dataset = QM9Dataset(label_keys=['mu', 'gap'], cutoff=5.0)\n",
93
+ "dataset_length = len(dataset)\n",
94
+ "train_idx = torch.arange(dataset_length)\n",
95
+ "class PreprocessedQM9Dataset(Dataset):\n",
96
+ " def __init__(self, dataset):\n",
97
+ " self.dataset = dataset\n",
98
+ " self.processed_data = []\n",
99
+ " self._preprocess()\n",
100
+ "\n",
101
+ " def _preprocess(self):\n",
102
+ " for g, label in tqdm(self.dataset):\n",
103
+ " g.ndata[\"Z_index\"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata[\"Z\"]])\n",
104
+ " self.processed_data.append((g, label))\n",
105
+ "\n",
106
+ " def __len__(self):\n",
107
+ " return len(self.processed_data)\n",
108
+ "\n",
109
+ " def __getitem__(self, idx):\n",
110
+ " return self.processed_data[idx]\n",
111
+ "\n",
112
+ "# 包装数据集\n",
113
+ "processed_dataset = PreprocessedQM9Dataset(dataset)"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": 1,
119
+ "id": "d1f69b7e2e1aa945",
120
+ "metadata": {
121
+ "ExecuteTime": {
122
+ "end_time": "2024-12-13T03:55:50.314260Z",
123
+ "start_time": "2024-12-13T03:55:50.115978Z"
124
+ }
125
+ },
126
+ "outputs": [
127
+ {
128
+ "ename": "NameError",
129
+ "evalue": "name 'processed_dataset' is not defined",
130
+ "output_type": "error",
131
+ "traceback": [
132
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
133
+ "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
134
+ "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mprocessed_dataset\u001b[49m[\u001b[38;5;241m0\u001b[39m])\n",
135
+ "\u001b[1;31mNameError\u001b[0m: name 'processed_dataset' is not defined"
136
+ ]
137
+ }
138
+ ],
139
+ "source": [
140
+ "print(processed_dataset[0])"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 5,
146
+ "id": "d1137deeda269919",
147
+ "metadata": {
148
+ "ExecuteTime": {
149
+ "end_time": "2024-12-13T04:05:20.442135Z",
150
+ "start_time": "2024-12-13T04:05:20.428230Z"
151
+ }
152
+ },
153
+ "outputs": [],
154
+ "source": [
155
+ "myGLoader = GraphDataLoader(processed_dataset,batch_size=4,pin_memory=True)"
156
+ ]
157
+ },
158
+ {
159
+ "cell_type": "code",
160
+ "execution_count": 13,
161
+ "id": "b44c553b-dc97-445c-b50f-5d8cc58e12c3",
162
+ "metadata": {
163
+ "ExecuteTime": {
164
+ "end_time": "2024-12-13T12:20:46.508536Z",
165
+ "start_time": "2024-12-13T12:20:20.023147Z"
166
+ }
167
+ },
168
+ "outputs": [
169
+ {
170
+ "name": "stderr",
171
+ "output_type": "stream",
172
+ "text": [
173
+ " 0%| | 138/32708 [00:00<00:23, 1368.45it/s]"
174
+ ]
175
+ },
176
+ {
177
+ "name": "stdout",
178
+ "output_type": "stream",
179
+ "text": [
180
+ "16\n",
181
+ "21\n",
182
+ "26\n",
183
+ "38\n",
184
+ "42\n",
185
+ "44\n",
186
+ "49\n",
187
+ "50\n",
188
+ "58\n",
189
+ "72\n",
190
+ "80\n",
191
+ "82\n"
192
+ ]
193
+ },
194
+ {
195
+ "name": "stderr",
196
+ "output_type": "stream",
197
+ "text": [
198
+ " 2%|▏ | 546/32708 [00:00<00:24, 1307.34it/s]"
199
+ ]
200
+ },
201
+ {
202
+ "name": "stdout",
203
+ "output_type": "stream",
204
+ "text": [
205
+ "84\n",
206
+ "86\n"
207
+ ]
208
+ },
209
+ {
210
+ "name": "stderr",
211
+ "output_type": "stream",
212
+ "text": [
213
+ " 5%|▍ | 1633/32708 [00:01<00:23, 1311.41it/s]"
214
+ ]
215
+ },
216
+ {
217
+ "name": "stdout",
218
+ "output_type": "stream",
219
+ "text": [
220
+ "94\n",
221
+ "96\n"
222
+ ]
223
+ },
224
+ {
225
+ "name": "stderr",
226
+ "output_type": "stream",
227
+ "text": [
228
+ " 43%|████▎ | 14160/32708 [00:10<00:15, 1224.36it/s]"
229
+ ]
230
+ },
231
+ {
232
+ "name": "stdout",
233
+ "output_type": "stream",
234
+ "text": [
235
+ "98\n",
236
+ "100\n",
237
+ "106\n"
238
+ ]
239
+ },
240
+ {
241
+ "name": "stderr",
242
+ "output_type": "stream",
243
+ "text": [
244
+ " 46%|████▌ | 14903/32708 [00:11<00:14, 1228.66it/s]"
245
+ ]
246
+ },
247
+ {
248
+ "name": "stdout",
249
+ "output_type": "stream",
250
+ "text": [
251
+ "110\n"
252
+ ]
253
+ },
254
+ {
255
+ "name": "stderr",
256
+ "output_type": "stream",
257
+ "text": [
258
+ "100%|██████████| 32708/32708 [00:26<00:00, 1235.31it/s]\n"
259
+ ]
260
+ }
261
+ ],
262
+ "source": [
263
+ "max_nodes = 0\n",
264
+ "for batch in tqdm(myGLoader):\n",
265
+ " g,label = batch\n",
266
+ " if g.num_nodes()>max_nodes:\n",
267
+ " max_nodes = g.num_nodes()\n",
268
+ " print(g.num_nodes())\n",
269
+ " # print(g)\n",
270
+ " # break\n",
271
+ " "
272
+ ]
273
+ },
274
+ {
275
+ "cell_type": "code",
276
+ "execution_count": 6,
277
+ "id": "1a5caea191a642bc",
278
+ "metadata": {
279
+ "ExecuteTime": {
280
+ "end_time": "2024-12-13T04:05:20.457355Z",
281
+ "start_time": "2024-12-13T04:05:20.443241Z"
282
+ }
283
+ },
284
+ "outputs": [],
285
+ "source": [
286
+ "from functools import partial\n",
287
+ "import sys\n",
288
+ "sys.path.append(\"lib\")\n",
289
+ "from lib.metrics import sce_loss\n",
290
+ "\n",
291
+ "class GMae(nn.Module):\n",
292
+ " def __init__(self, encoder,decoder,\n",
293
+ " in_dim,hidden_dim,out_dim,mask_rate=0.3,replace_rate=0.1,alpha_l=2,\n",
294
+ " embedding_layer_classes=5,embedding_layer_dim=4):\n",
295
+ " super(GMae, self).__init__()\n",
296
+ " self.Z_embedding = nn.Embedding(embedding_layer_classes,embedding_layer_dim)\n",
297
+ " self.encoder = encoder\n",
298
+ " self.decoder = decoder\n",
299
+ " self.mask_rate = mask_rate\n",
300
+ " self.replace_rate = replace_rate\n",
301
+ " self.alpha_l = alpha_l\n",
302
+ " self.in_dim = in_dim\n",
303
+ " self.hidden_dim = hidden_dim\n",
304
+ " self.out_dim = out_dim\n",
305
+ " self.embedding_layer_classes = embedding_layer_classes\n",
306
+ " self.embedding_layer_dim = embedding_layer_dim\n",
307
+ " self.enc_mask_token = nn.Parameter(torch.zeros(1,in_dim))\n",
308
+ " self.criterion = partial(sce_loss, alpha=alpha_l)\n",
309
+ " self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)\n",
310
+ " def encode_atom_index(self,Z_index):\n",
311
+ " return self.Z_embedding(Z_index)\n",
312
+ " def encoding_mask_noise(self, g, x, mask_rate=0.3):\n",
313
+ " num_nodes = g.num_nodes()\n",
314
+ " perm = torch.randperm(num_nodes, device=x.device)\n",
315
+ " # random masking\n",
316
+ " num_mask_nodes = int(mask_rate * num_nodes)\n",
317
+ " mask_nodes = perm[: num_mask_nodes]\n",
318
+ " keep_nodes = perm[num_mask_nodes: ]\n",
319
+ "\n",
320
+ " if self.replace_rate > 0:\n",
321
+ " num_noise_nodes = int(self.replace_rate * num_mask_nodes)\n",
322
+ " perm_mask = torch.randperm(num_mask_nodes, device=x.device)\n",
323
+ " token_nodes = mask_nodes[perm_mask[: int((1-self.replace_rate) * num_mask_nodes)]]\n",
324
+ " noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]\n",
325
+ " noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]\n",
326
+ " out_x = x.clone()\n",
327
+ " out_x[token_nodes] = 0.0\n",
328
+ " out_x[noise_nodes] = x[noise_to_be_chosen]\n",
329
+ " else:\n",
330
+ " out_x = x.clone()\n",
331
+ " token_nodes = mask_nodes\n",
332
+ " out_x[mask_nodes] = 0.0\n",
333
+ "\n",
334
+ " out_x[token_nodes] += self.enc_mask_token\n",
335
+ " use_g = g.clone()\n",
336
+ "\n",
337
+ " return use_g, out_x, (mask_nodes, keep_nodes) \n",
338
+ " def mask_attr_prediction(self, g, x):\n",
339
+ " use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)\n",
340
+ " enc_rep = self.encoder(use_g, use_x)\n",
341
+ " # ---- attribute reconstruction ----\n",
342
+ " rep = self.encoder_to_decoder(enc_rep)\n",
343
+ " recon = self.decoder(use_g, rep)\n",
344
+ " x_init = x[mask_nodes]\n",
345
+ " x_rec = recon[mask_nodes]\n",
346
+ " loss = self.criterion(x_rec, x_init)\n",
347
+ " return loss\n",
348
+ "\n",
349
+ " def embed(self, g, x):\n",
350
+ " rep = self.encoder(g, x)\n",
351
+ " return rep\n",
352
+ " "
353
+ ]
354
+ },
355
+ {
356
+ "cell_type": "code",
357
+ "execution_count": 7,
358
+ "id": "c99cb509ac0f1054",
359
+ "metadata": {
360
+ "ExecuteTime": {
361
+ "end_time": "2024-12-13T04:05:20.473215Z",
362
+ "start_time": "2024-12-13T04:05:20.458354Z"
363
+ }
364
+ },
365
+ "outputs": [],
366
+ "source": [
367
+ "import dgl.nn as dglnn\n",
368
+ "import torch.nn as nn\n",
369
+ "import torch.nn.functional as F\n",
370
+ "class SimpleGNN(nn.Module):\n",
371
+ " def __init__(self, in_feats, hid_feats, out_feats):\n",
372
+ " super().__init__()\n",
373
+ " self.conv1 = dglnn.SAGEConv(\n",
374
+ " in_feats=in_feats, out_feats=hid_feats,aggregator_type=\"mean\")\n",
375
+ " self.conv2 = dglnn.SAGEConv(\n",
376
+ " in_feats=hid_feats, out_feats=out_feats,aggregator_type=\"mean\")\n",
377
+ "\n",
378
+ " def forward(self, graph, inputs):\n",
379
+ " # 输入是节点的特征\n",
380
+ " h = self.conv1(graph, inputs)\n",
381
+ " h = F.relu(h)\n",
382
+ " h = self.conv2(graph, h)\n",
383
+ " return h"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": 8,
389
+ "id": "5a8a4e4dd753b642",
390
+ "metadata": {
391
+ "ExecuteTime": {
392
+ "end_time": "2024-12-13T04:05:20.707956Z",
393
+ "start_time": "2024-12-13T04:05:20.474302Z"
394
+ }
395
+ },
396
+ "outputs": [],
397
+ "source": [
398
+ "sage_enc = SimpleGNN(in_feats=7,hid_feats=4,out_feats=4)\n",
399
+ "sage_dec = SimpleGNN(in_feats=4,hid_feats=4,out_feats=7)\n",
400
+ "gmae = GMae(sage_enc,sage_dec,7,4,7,replace_rate=0)\n",
401
+ "epoches = 5\n",
402
+ "optimizer = optim.Adam(gmae.parameters(), lr=1e-3)\n",
403
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
404
+ ]
405
+ },
406
+ {
407
+ "cell_type": "code",
408
+ "execution_count": 11,
409
+ "id": "224529a988b81ef5",
410
+ "metadata": {
411
+ "ExecuteTime": {
412
+ "end_time": "2024-12-13T03:59:44.770215Z",
413
+ "start_time": "2024-12-13T03:59:11.545931Z"
414
+ }
415
+ },
416
+ "outputs": [
417
+ {
418
+ "name": "stdout",
419
+ "output_type": "stream",
420
+ "text": [
421
+ "epoch 0 started!\n"
422
+ ]
423
+ },
424
+ {
425
+ "name": "stderr",
426
+ "output_type": "stream",
427
+ "text": [
428
+ " 10%|▉ | 3262/32708 [00:32<04:55, 99.64it/s] \n",
429
+ "\n",
430
+ "KeyboardInterrupt\n",
431
+ "\n"
432
+ ]
433
+ }
434
+ ],
435
+ "source": [
436
+ "# print(f\"epoch {0} started!\")\n",
437
+ "# gmae.train()\n",
438
+ "# gmae.encoder.train()\n",
439
+ "# gmae.decoder.train()\n",
440
+ "# gmae.to(device)\n",
441
+ "# loss_epoch = 0\n",
442
+ "# import os\n",
443
+ "# os.environ[\"CUDA_LAUNCH_BLOCKING\"]=\"1\"\n",
444
+ "# for batch in tqdm(myGLoader):\n",
445
+ "# optimizer.zero_grad()\n",
446
+ "# batch_g, _ = batch\n",
447
+ "# R = batch_g.ndata[\"R\"].to(device)\n",
448
+ "# Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
449
+ "# Z_emb = gmae.encode_atom_index(Z_index)\n",
450
+ "# feat = torch.cat([R,Z_emb],dim=1)\n",
451
+ "# batch_g = batch_g.to(device)\n",
452
+ "# loss = gmae.mask_attr_prediction(batch_g, feat)\n",
453
+ "# loss.backward()\n",
454
+ "# optimizer.step()\n",
455
+ "# loss_epoch+=loss.item()\n"
456
+ ]
457
+ },
458
+ {
459
+ "cell_type": "code",
460
+ "execution_count": 9,
461
+ "id": "a22599c4e591125b",
462
+ "metadata": {
463
+ "ExecuteTime": {
464
+ "end_time": "2024-12-13T04:30:37.389930Z",
465
+ "start_time": "2024-12-13T04:05:20.708461Z"
466
+ }
467
+ },
468
+ "outputs": [
469
+ {
470
+ "name": "stdout",
471
+ "output_type": "stream",
472
+ "text": [
473
+ "epoch 0 started!\n"
474
+ ]
475
+ },
476
+ {
477
+ "name": "stderr",
478
+ "output_type": "stream",
479
+ "text": [
480
+ "100%|██████████| 32708/32708 [05:11<00:00, 105.09it/s]\n"
481
+ ]
482
+ },
483
+ {
484
+ "name": "stdout",
485
+ "output_type": "stream",
486
+ "text": [
487
+ "best model saved-loss:470.463-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-0-470.463.pt\n",
488
+ "epoch 0: loss 470.46260083183415\n",
489
+ "epoch 1 started!\n"
490
+ ]
491
+ },
492
+ {
493
+ "name": "stderr",
494
+ "output_type": "stream",
495
+ "text": [
496
+ "100%|██████████| 32708/32708 [05:04<00:00, 107.34it/s]\n"
497
+ ]
498
+ },
499
+ {
500
+ "name": "stdout",
501
+ "output_type": "stream",
502
+ "text": [
503
+ "best model saved-loss:18.848-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-1-18.848.pt\n",
504
+ "epoch 1: loss 18.848073385778548\n",
505
+ "epoch 2 started!\n"
506
+ ]
507
+ },
508
+ {
509
+ "name": "stderr",
510
+ "output_type": "stream",
511
+ "text": [
512
+ "100%|██████████| 32708/32708 [04:59<00:00, 109.35it/s]\n"
513
+ ]
514
+ },
515
+ {
516
+ "name": "stdout",
517
+ "output_type": "stream",
518
+ "text": [
519
+ "best model saved-loss:4.784-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-2-4.784.pt\n",
520
+ "epoch 2: loss 4.7842518344823475\n",
521
+ "epoch 3 started!\n"
522
+ ]
523
+ },
524
+ {
525
+ "name": "stderr",
526
+ "output_type": "stream",
527
+ "text": [
528
+ "100%|██████████| 32708/32708 [05:04<00:00, 107.37it/s]\n"
529
+ ]
530
+ },
531
+ {
532
+ "name": "stdout",
533
+ "output_type": "stream",
534
+ "text": [
535
+ "best model saved-loss:1.336-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-3-1.336.pt\n",
536
+ "epoch 3: loss 1.336019518836153\n",
537
+ "epoch 4 started!\n"
538
+ ]
539
+ },
540
+ {
541
+ "name": "stderr",
542
+ "output_type": "stream",
543
+ "text": [
544
+ "100%|██████████| 32708/32708 [04:56<00:00, 110.21it/s]"
545
+ ]
546
+ },
547
+ {
548
+ "name": "stdout",
549
+ "output_type": "stream",
550
+ "text": [
551
+ "best model saved-loss:0.572-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-4-0.572.pt\n",
552
+ "epoch 4: loss 0.5721691430861142\n"
553
+ ]
554
+ },
555
+ {
556
+ "name": "stderr",
557
+ "output_type": "stream",
558
+ "text": [
559
+ "\n"
560
+ ]
561
+ }
562
+ ],
563
+ "source": [
564
+ "from datetime import datetime\n",
565
+ "\n",
566
+ "current_time = datetime.now().strftime(\"%m-%d@%H_%M\")\n",
567
+ "best_loss = 10000\n",
568
+ "for epoch in range(epoches):\n",
569
+ " print(f\"epoch {epoch} started!\")\n",
570
+ " gmae.train()\n",
571
+ " gmae.encoder.train()\n",
572
+ " gmae.decoder.train()\n",
573
+ " gmae.to(device)\n",
574
+ " loss_epoch = 0\n",
575
+ " for batch in tqdm(myGLoader):\n",
576
+ " optimizer.zero_grad()\n",
577
+ " batch_g, _ = batch\n",
578
+ " R = batch_g.ndata[\"R\"].to(device)\n",
579
+ " # Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
580
+ " Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
581
+ " Z_emb = gmae.encode_atom_index(Z_index)\n",
582
+ " feat = torch.cat([R,Z_emb],dim=1)\n",
583
+ " batch_g = batch_g.to(device)\n",
584
+ " loss = gmae.mask_attr_prediction(batch_g, feat)\n",
585
+ " loss.backward()\n",
586
+ " optimizer.step()\n",
587
+ " loss_epoch+=loss.item()\n",
588
+ " if loss_epoch < best_loss:\n",
589
+ " formatted_loss_epoch = f\"{loss_epoch:.3f}\"\n",
590
+ " save_path = f\"./experiments/QM9/gmae/{current_time}/gmae_epoch-{epoch}-{formatted_loss_epoch}.pt\"\n",
591
+ " save_dir = os.path.dirname(save_path)\n",
592
+ " if not os.path.exists(save_dir):\n",
593
+ " os.makedirs(save_dir,exist_ok=True)\n",
594
+ " torch.save(gmae.state_dict(), save_path)\n",
595
+ " best_loss = loss_epoch\n",
596
+ " print(f\"best model saved-loss:{formatted_loss_epoch}-save_path:{save_path}\")\n",
597
+ " print(f\"epoch {epoch}: loss {loss_epoch}\")"
598
+ ]
599
+ }
600
+ ],
601
+ "metadata": {
602
+ "kernelspec": {
603
+ "display_name": "gnn_course",
604
+ "language": "python",
605
+ "name": "gnn_course"
606
+ },
607
+ "language_info": {
608
+ "codemirror_mode": {
609
+ "name": "ipython",
610
+ "version": 3
611
+ },
612
+ "file_extension": ".py",
613
+ "mimetype": "text/x-python",
614
+ "name": "python",
615
+ "nbconvert_exporter": "python",
616
+ "pygments_lexer": "ipython3",
617
+ "version": "3.8.20"
618
+ }
619
+ },
620
+ "nbformat": 4,
621
+ "nbformat_minor": 5
622
+ }
README.md CHANGED
@@ -1,88 +1,43 @@
1
- ---
2
- license: mit
3
- ---
4
- # 模型训练过程汇总(持续更新中)
5
-
6
- 对于已收集的每一个模型,`code` 目录为模型定义、训练和测试的代码和脚本文件,`model` 目录为已收集的 epoch 模型文件,`dataset.zip` 为模型数据集。
7
-
8
- 下表汇总了所有收集的模型训练过程信息:
9
-
10
- <table>
11
- <tr>
12
- <th>模型名称</th>
13
- <th>模型简介</th>
14
- <th>模型类型</th>
15
- <th>Epoch数量</th>
16
- <th>数据集信息</th>
17
- </tr>
18
- <tr>
19
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Code-Code/Clone-detection-BigCloneBench">Clone-detection-BigCloneBench</a></td>
20
- <td>基于大规模代码克隆基准数据集的代码克隆检测模型,任务是进行二元分类(0/1),其中1代表语义等价,0代表其他情况。</td>
21
- <td>代码克隆检测</td>
22
- <td>2个epoch</td>
23
- <td>BigCloneBench数据集</td>
24
- </tr>
25
- <tr>
26
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Code-Code/Clone-detection-POJ-104">Clone-detection-POJ-104</a></td>
27
- <td>基于POJ-104数据集的代码克隆检测模型,任务是识别不同编程题目中相似的代码实现,给定一段代码和一组候选代码,任务是返回具有相同语义的Top K个代码</td>
28
- <td>代码克隆检测</td>
29
- <td>2个epoch (0-1)</td>
30
- <td>POJ-104编程题目数据集</td>
31
- </tr>
32
- <tr>
33
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Code-Code/CodeCompletion-token">CodeCompletion-token</a></td>
34
- <td>基于token级别的代码自动补全模型</td>
35
- <td>代码补全</td>
36
- <td>5个epoch (Java语料库)</td>
37
- <td>Java代码token序列数据集</td>
38
- </tr>
39
- <tr>
40
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Code-Code/Defect-detection">Defect-detection</a></td>
41
- <td>代码缺陷检测模型,通过分析代码来识别潜在的缺陷和错误(进行二元分类(0/1))</td>
42
- <td>代码缺陷检测</td>
43
- <td>5个epoch</td>
44
- <td>包含缺陷标注的C语言代码数据集</td>
45
- </tr>
46
- <tr>
47
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Code-Code/code-refinement">code-refinement</a></td>
48
- <td>代码优化模型</td>
49
- <td>代码优化/重构</td>
50
- <td>34个epoch(small数据集)</td>
51
- <td>代码优化前后对数据集(C语言)</td>
52
- </tr>
53
- <tr>
54
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Code-Text/code-to-text">code-to-text</a></td>
55
- <td>代码到自然语言的转换模型</td>
56
- <td>代码注释生成</td>
57
- <td>每种语言10个epoch (支持Python/Java/JavaScript/PHP/Ruby/Go)</td>
58
- <td>多语言代码-文本对数据集</td>
59
- </tr>
60
- <tr>
61
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Text-code/NL-code-search-Adv">NL-code-search-Adv</a></td>
62
- <td>高级自然语言代码搜索模型,通过计算自然语言查询与代码片段之间的相似性来实现代码搜索,</td>
63
- <td>代码搜索</td>
64
- <td>2个epoch</td>
65
- <td>自然语言-(python)代码对数据集</td>
66
- </tr>
67
- <tr>
68
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Text-code/NL-code-search-WebQuery">NL-code-search-WebQuery</a></td>
69
- <td>基于Web查询的代码搜索模型,该模型通过编码器处理代码和自然语言输入,并利用多层感知器(MLP)来计算相似性得分</td>
70
- <td>代码搜索</td>
71
- <td>两个数据集各3个epoch</td>
72
- <td>Web查询-代码对数据集(CodeSearchNet数据集和CoSQA数据集(python))</td>
73
- </tr>
74
- <tr>
75
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Text-code/text-to-code">text-to-code</a></td>
76
- <td>自然语言到代码的生成模型</td>
77
- <td>代码生成</td>
78
- <td>23个epoch</td>
79
- <td>文本描述-代码(c语言)对数据集</td>
80
- </tr>
81
- <tr>
82
- <td><a href="https://huggingface.co/datasets/code-philia/ttvnet/tree/main/Graph/GraphMAE_MQ9">GraphMAE_MQ9</a></td>
83
- <td>在QM9数据集上训练的图掩码自编码器,通过对分子图中的原子的坐标以及类型进行预测实现自监督训练</td>
84
- <td>图自编码器</td>
85
- <td>10个epoch</td>
86
- <td>分子属性预测数据集</td>
87
- </tr>
88
- </table>
 
1
+ # Graph Mask AutoEncoder(GraphMAE) on QM9 Dataset
2
+ ## Overview
3
+ We run the **Graph Mask AutoEncoder** on **QM9 Dataset** for pretraining. We use the atom position of each atom and the embedding of their element type as the input feature (dim=7) and predict the input feature by using the GraphSage with 4-dim hidden representation.
4
+
5
+ **Total Epochs: 10**
6
+ ## How to run
7
+ ### If you do not want to re-train the model again
8
+ - **Unzip the model.zip** to get the model weight & embedded graph in each epoch
9
+ ### If you want to try out the training process
10
+ - step1. **Preprocess the dataset** (we have provided the preprocessed as well)
11
+
12
+ ```bash
13
+ python prepare_QM9_dataset.py --label_keys "mu" "gap"
14
+ ```
15
+ - step2. **Train the Graph Mask AutoEncoder on the preprocessed dataset**
16
+ ```bash
17
+ python run.py [--dataset_path] [--batch_size] [--epochs] [--device] [--save_dir]
18
+ ```
19
+
20
+ ## Model Description
21
+ ### Overview
22
+ Ref:**[GraphMAE](https://arxiv.org/abs/2205.10803)**
23
+ >Self-supervised learning (SSL) has been extensively explored in recent years. Particularly, generative SSL has seen emerging success in natural language processing and other AI fields, such as the wide adoption of BERT and GPT. Despite this, contrastive learning-which heavily relies on structural data augmentation and complicated training strategies-has been the dominant approach in graph SSL, while the progress of generative SSL on graphs, especially graph autoencoders (GAEs), has thus far not reached the potential as promised in other fields. In this paper, we identify and examine the issues that negatively impact the development of GAEs, including their reconstruction objective, training robustness, and error metric. We present a masked graph autoencoder GraphMAE that mitigates these issues for generative self-supervised graph pretraining. Instead of reconstructing graph structures, we propose to focus on feature reconstruction with both a masking strategy and scaled cosine error that benefit the robust training of GraphMAE. We conduct extensive experiments on 21 public datasets for three different graph learning tasks. The results manifest that GraphMAE-a simple graph autoencoder with careful designs-can consistently generate outperformance over both contrastive and generative state-of-the-art baselines. This study provides an understanding of graph autoencoders and demonstrates the potential of generative self-supervised pre-training on graphs.
24
+
25
+ ### Detail
26
+ Encoder & Decoder: Two layer [GraphSage](https://docs.dgl.ai/generated/dgl.nn.pytorch.conv.SAGEConv.html)
27
+
28
+ Readout Method: Mean
29
+
30
+ HiddenDims: 4 (Default)
31
+
32
+ MaskRate: 0.3 (Default)
33
+
34
+ Training on RTX 4060
35
+
36
+ ## Dataset Description
37
+ ### Overview
38
+ Ref: **[QM9](https://docs.dgl.ai/generated/dgl.data.QM9Dataset.html)**
39
+ > Type: Molecule property prediction
40
+ >
41
+ > Sample_num: 130831
42
+ >
43
+ > Total Elements: H,C,N,O,F
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
code/QM9_dataset_class.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from tqdm import tqdm
4
+ import networkx as nx
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+
8
+ atom_number_index_dict = {
9
+ 1: 0, # H
10
+ 6: 1, # C
11
+ 7: 2, # N
12
+ 8: 3, # O
13
+ 9: 4 # F
14
+ }
15
+ atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}
16
+ max_atom_number = max(atom_number_index_dict.keys())
17
+
18
+
19
+ def atom_number2index(atom_number):
20
+ return atom_number_index_dict[atom_number]
21
+
22
+
23
+ def atom_index2number(atom_index):
24
+ return atom_index_number_dict[atom_index]
25
+
26
+
27
+ class PreprocessedQM9Dataset(Dataset):
28
+ def __init__(self, dataset):
29
+ self.dataset = dataset
30
+ self.processed_data = []
31
+ if dataset is not None:
32
+ self._preprocess()
33
+ def _preprocess(self):
34
+ i = 0
35
+ for g, label in tqdm(self.dataset):
36
+ g.ndata["Z_index"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata["Z"]])
37
+ g.ndata["sample_idx"] = i
38
+ self.processed_data.append((g, label))
39
+
40
+ def __len__(self):
41
+ return len(self.processed_data)
42
+
43
+ def __getitem__(self, idx):
44
+ return self.processed_data[idx]
45
+
46
+ def save_dataset(self, save_dir):
47
+ if not os.path.exists(save_dir):
48
+ os.makedirs(save_dir)
49
+ torch.save(self.processed_data, os.path.join(save_dir,"QM9_dataset_processed.pt"))
50
+ def load_dataset(self, dataset_path):
51
+ self.processed_data = torch.load(dataset_path)
code/lib/__pycache__/metrics.cpython-38.pyc ADDED
Binary file (2.42 kB). View file
 
code/lib/metrics.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn.functional as F
6
+
7
+
8
+ def masked_mape_np(y_true, y_pred, null_val=np.nan):
9
+ with np.errstate(divide='ignore', invalid='ignore'):
10
+ if np.isnan(null_val):
11
+ mask = ~np.isnan(y_true)
12
+ else:
13
+ mask = np.not_equal(y_true, null_val)
14
+ mask = mask.astype('float32')
15
+ mask /= np.mean(mask)
16
+ mape = np.abs(np.divide(np.subtract(y_pred, y_true).astype('float32'),
17
+ y_true))
18
+ mape = np.nan_to_num(mask * mape)
19
+ return np.mean(mape)
20
+
21
+
22
+ def masked_mse(preds, labels, null_val=np.nan):
23
+ if np.isnan(null_val):
24
+ mask = ~torch.isnan(labels)
25
+ else:
26
+ mask = (labels != null_val)
27
+ mask = mask.float()
28
+ # print(mask.sum())
29
+ # print(mask.shape[0]*mask.shape[1]*mask.shape[2])
30
+ mask /= torch.mean((mask))
31
+ mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
32
+ loss = (preds - labels) ** 2
33
+ loss = loss * mask
34
+ loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
35
+ return torch.mean(loss)
36
+
37
+
38
+ def masked_rmse(preds, labels, null_val=np.nan):
39
+ return torch.sqrt(masked_mse(preds=preds, labels=labels,
40
+ null_val=null_val))
41
+
42
+
43
+ def masked_mae(preds, labels, null_val=np.nan):
44
+ if np.isnan(null_val):
45
+ mask = ~torch.isnan(labels)
46
+ else:
47
+ mask = (labels != null_val)
48
+ mask = mask.float()
49
+ mask /= torch.mean((mask))
50
+ mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
51
+ loss = torch.abs(preds - labels)
52
+ loss = loss * mask
53
+ loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
54
+ return torch.mean(loss)
55
+
56
+
57
+ def masked_mae_test(y_true, y_pred, null_val=np.nan):
58
+ with np.errstate(divide='ignore', invalid='ignore'):
59
+ if np.isnan(null_val):
60
+ mask = ~np.isnan(y_true)
61
+ else:
62
+ mask = np.not_equal(y_true, null_val)
63
+ mask = mask.astype('float32')
64
+ mask /= np.mean(mask)
65
+ mae = np.abs(np.subtract(y_pred, y_true).astype('float32'),
66
+ )
67
+ mae = np.nan_to_num(mask * mae)
68
+ return np.mean(mae)
69
+
70
+
71
+ def masked_rmse_test(y_true, y_pred, null_val=np.nan):
72
+ with np.errstate(divide='ignore', invalid='ignore'):
73
+ if np.isnan(null_val):
74
+ mask = ~np.isnan(y_true)
75
+ else:
76
+ # null_val=null_val
77
+ mask = np.not_equal(y_true, null_val)
78
+ mask = mask.astype('float32')
79
+ mask /= np.mean(mask)
80
+ mse = ((y_pred - y_true) ** 2)
81
+ mse = np.nan_to_num(mask * mse)
82
+ return np.sqrt(np.mean(mse))
83
+
84
+
85
+ def sce_loss(x, y, alpha=3):
86
+ x = F.normalize(x, p=2, dim=-1)
87
+ y = F.normalize(y, p=2, dim=-1)
88
+
89
+ # loss = - (x * y).sum(dim=-1)
90
+ # loss = (x_h - y_h).norm(dim=1).pow(alpha)
91
+
92
+ loss = (1 - (x * y).sum(dim=-1)).pow_(alpha)
93
+
94
+ loss = loss.mean()
95
+ return loss
code/lib/utils.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import torch.utils.data
5
+ from sklearn.metrics import mean_absolute_error
6
+ from sklearn.metrics import mean_squared_error
7
+ import sys
8
+ project_path = "/content/gdrive//My Drive/CS5248_project"
9
+ sys.path.append(project_path + '/lib')
10
+ from metrics import masked_mape_np
11
+ from scipy.sparse.linalg import eigs
12
+ from metrics import masked_mape_np, masked_mae,masked_mse,masked_rmse,masked_mae_test,masked_rmse_test
13
+
14
+
15
+ def re_normalization(x, mean, std):
16
+ x = x * std + mean
17
+ return x
18
+
19
+
20
+ def max_min_normalization(x, _max, _min):
21
+ x = 1. * (x - _min)/(_max - _min)
22
+ x = x * 2. - 1.
23
+ return x
24
+
25
+
26
+ def re_max_min_normalization(x, _max, _min):
27
+ x = (x + 1.) / 2.
28
+ x = 1. * x * (_max - _min) + _min
29
+ return x
30
+
31
+
32
+ def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None):
33
+ '''
34
+ Parameters
35
+ ----------
36
+ distance_df_filename: str, path of the csv file contains edges information
37
+
38
+ num_of_vertices: int, the number of vertices
39
+
40
+ Returns
41
+ ----------
42
+ A: np.ndarray, adjacency matrix
43
+
44
+ '''
45
+ if 'npy' in distance_df_filename:
46
+
47
+ adj_mx = np.load(distance_df_filename)
48
+
49
+ return adj_mx, None
50
+
51
+ else:
52
+
53
+ import csv
54
+
55
+ A = np.zeros((int(num_of_vertices), int(num_of_vertices)),
56
+ dtype=np.float32)
57
+
58
+ distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)),
59
+ dtype=np.float32)
60
+
61
+ if id_filename:
62
+
63
+ with open(id_filename, 'r') as f:
64
+ id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引
65
+
66
+ with open(distance_df_filename, 'r') as f:
67
+ f.readline()
68
+ reader = csv.reader(f)
69
+ for row in reader:
70
+ if len(row) != 3:
71
+ continue
72
+ i, j, distance = int(row[0]), int(row[1]), float(row[2])
73
+ A[id_dict[i], id_dict[j]] = 1
74
+ distaneA[id_dict[i], id_dict[j]] = distance
75
+ return A, distaneA
76
+
77
+ else:
78
+
79
+ with open(distance_df_filename, 'r') as f:
80
+ f.readline()
81
+ reader = csv.reader(f)
82
+ for row in reader:
83
+ if len(row) != 3:
84
+ continue
85
+ i, j, distance = int(row[0]), int(row[1]), float(row[2])
86
+ A[i, j] = 1
87
+ distaneA[i, j] = distance
88
+ return A, distaneA
89
+
90
+
91
+ def scaled_Laplacian(W):
92
+ '''
93
+ compute \tilde{L}
94
+
95
+ Parameters
96
+ ----------
97
+ W: np.ndarray, shape is (N, N), N is the num of vertices
98
+
99
+ Returns
100
+ ----------
101
+ scaled_Laplacian: np.ndarray, shape (N, N)
102
+
103
+ '''
104
+
105
+ assert W.shape[0] == W.shape[1]
106
+
107
+ D = np.diag(np.sum(W, axis=1))
108
+
109
+ L = D - W
110
+
111
+ lambda_max = eigs(L, k=1, which='LR')[0].real
112
+
113
+ return (2 * L) / lambda_max - np.identity(W.shape[0])
114
+
115
+
116
+ def cheb_polynomial(L_tilde, K):
117
+ '''
118
+ compute a list of chebyshev polynomials from T_0 to T_{K-1}
119
+
120
+ Parameters
121
+ ----------
122
+ L_tilde: scaled Laplacian, np.ndarray, shape (N, N)
123
+
124
+ K: the maximum order of chebyshev polynomials
125
+
126
+ Returns
127
+ ----------
128
+ cheb_polynomials: list(np.ndarray), length: K, from T_0 to T_{K-1}
129
+
130
+ '''
131
+
132
+ N = L_tilde.shape[0]
133
+
134
+ cheb_polynomials = [np.identity(N), L_tilde.copy()]
135
+
136
+ for i in range(2, K):
137
+ cheb_polynomials.append(2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2])
138
+
139
+ return cheb_polynomials
140
+
141
+
142
+ def load_graphdata_channel1(graph_signal_matrix_filename, num_of_indices, DEVICE, batch_size, shuffle=True):
143
+ '''
144
+ 这个是为PEMS的数据准备的函数
145
+ 将x,y都处理成归一化到[-1,1]之前的数据;
146
+ 每个样本同时包含所有监测点的数据,所以本函数构造的数据输入时空序列预测模型;
147
+ 该函数会把hour, day, week的时间串起来;
148
+ 注: 从文件读入的数据,x是最大最小归一化的,但是y是真实值
149
+ 这个函数转为mstgcn,astgcn设计,返回的数据x都是通过减均值除方差进行归一化的,y都是真实值
150
+ :param graph_signal_matrix_filename: str
151
+ :param num_of_hours: int
152
+ :param num_of_days: int
153
+ :param num_of_weeks: int
154
+ :param DEVICE:
155
+ :param batch_size: int
156
+ :return:
157
+ three DataLoaders, each dataloader contains:
158
+ test_x_tensor: (B, N_nodes, in_feature, T_input)
159
+ test_decoder_input_tensor: (B, N_nodes, T_output)
160
+ test_target_tensor: (B, N_nodes, T_output)
161
+
162
+ '''
163
+
164
+ file = os.path.basename(graph_signal_matrix_filename).split('.')[0]
165
+
166
+ dirpath = os.path.dirname(graph_signal_matrix_filename)
167
+
168
+ filename = os.path.join(dirpath,
169
+ file) +'_astcgn'
170
+
171
+ print('load file:', filename)
172
+
173
+ file_data = np.load(filename + '.npz')
174
+ train_x = file_data['train_x'] # (10181, 307, 3, 12)
175
+ train_x = train_x[:, :, 0:5, :]
176
+ train_target = file_data['train_target'] # (10181, 307, 12)
177
+
178
+ val_x = file_data['val_x']
179
+ val_x = val_x[:, :, 0:5, :]
180
+ val_target = file_data['val_target']
181
+
182
+ test_x = file_data['test_x']
183
+ test_x = test_x[:, :, 0:5, :]
184
+ test_target = file_data['test_target']
185
+
186
+ mean = file_data['mean'][:, :, 0:5, :] # (1, 1, 3, 1)
187
+ std = file_data['std'][:, :, 0:5, :] # (1, 1, 3, 1)
188
+
189
+ # ------- train_loader -------
190
+ train_x_tensor = torch.from_numpy(train_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
191
+ train_target_tensor = torch.from_numpy(train_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
192
+
193
+ train_dataset = torch.utils.data.TensorDataset(train_x_tensor, train_target_tensor)
194
+
195
+ train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle)
196
+
197
+ # ------- val_loader -------
198
+ val_x_tensor = torch.from_numpy(val_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
199
+ val_target_tensor = torch.from_numpy(val_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
200
+
201
+ val_dataset = torch.utils.data.TensorDataset(val_x_tensor, val_target_tensor)
202
+
203
+ val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
204
+
205
+ # ------- test_loader -------
206
+ test_x_tensor = torch.from_numpy(test_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
207
+ test_target_tensor = torch.from_numpy(test_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
208
+
209
+ test_dataset = torch.utils.data.TensorDataset(test_x_tensor, test_target_tensor)
210
+
211
+ test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
212
+
213
+ # print
214
+ print('train:', train_x_tensor.size(), train_target_tensor.size())
215
+ print('val:', val_x_tensor.size(), val_target_tensor.size())
216
+ print('test:', test_x_tensor.size(), test_target_tensor.size())
217
+
218
+ return train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, mean, std
219
+
220
+
221
+ def compute_val_loss_mstgcn(net, val_loader, criterion, masked_flag,missing_value,sw, epoch, limit=None):
222
+ '''
223
+ for rnn, compute mean loss on validation set
224
+ :param net: model
225
+ :param val_loader: torch.utils.data.utils.DataLoader
226
+ :param criterion: torch.nn.MSELoss
227
+ :param sw: tensorboardX.SummaryWriter
228
+ :param global_step: int, current global_step
229
+ :param limit: int,
230
+ :return: val_loss
231
+ '''
232
+
233
+ net.train(False) # ensure dropout layers are in evaluation mode
234
+
235
+ with torch.no_grad():
236
+
237
+ val_loader_length = len(val_loader) # nb of batch
238
+
239
+ tmp = [] # 记录了所有batch的loss
240
+
241
+ for batch_index, batch_data in enumerate(val_loader):
242
+ encoder_inputs, labels = batch_data
243
+ outputs = net(encoder_inputs)
244
+ if masked_flag:
245
+ loss = criterion(outputs, labels, missing_value)
246
+ else:
247
+ loss = criterion(outputs, labels)
248
+
249
+ tmp.append(loss.item())
250
+ if batch_index % 100 == 0:
251
+ print('validation batch %s / %s, loss: %.2f' % (batch_index + 1, val_loader_length, loss.item()))
252
+ if (limit is not None) and batch_index >= limit:
253
+ break
254
+
255
+ validation_loss = sum(tmp) / len(tmp)
256
+ sw.add_scalar('validation_loss', validation_loss, epoch)
257
+ return validation_loss
258
+
259
+
260
+ # def evaluate_on_test_mstgcn(net, test_loader, test_target_tensor, sw, epoch, _mean, _std):
261
+ # '''
262
+ # for rnn, compute MAE, RMSE, MAPE scores of the prediction for every time step on testing set.
263
+ #
264
+ # :param net: model
265
+ # :param test_loader: torch.utils.data.utils.DataLoader
266
+ # :param test_target_tensor: torch.tensor (B, N_nodes, T_output, out_feature)=(B, N_nodes, T_output, 1)
267
+ # :param sw:
268
+ # :param epoch: int, current epoch
269
+ # :param _mean: (1, 1, 3(features), 1)
270
+ # :param _std: (1, 1, 3(features), 1)
271
+ # '''
272
+ #
273
+ # net.train(False) # ensure dropout layers are in test mode
274
+ #
275
+ # with torch.no_grad():
276
+ #
277
+ # test_loader_length = len(test_loader)
278
+ #
279
+ # test_target_tensor = test_target_tensor.cpu().numpy()
280
+ #
281
+ # prediction = [] # 存储所有batch的output
282
+ #
283
+ # for batch_index, batch_data in enumerate(test_loader):
284
+ #
285
+ # encoder_inputs, labels = batch_data
286
+ #
287
+ # outputs = net(encoder_inputs)
288
+ #
289
+ # prediction.append(outputs.detach().cpu().numpy())
290
+ #
291
+ # if batch_index % 100 == 0:
292
+ # print('predicting testing set batch %s / %s' % (batch_index + 1, test_loader_length))
293
+ #
294
+ # prediction = np.concatenate(prediction, 0) # (batch, T', 1)
295
+ # prediction_length = prediction.shape[2]
296
+ #
297
+ # for i in range(prediction_length):
298
+ # assert test_target_tensor.shape[0] == prediction.shape[0]
299
+ # print('current epoch: %s, predict %s points' % (epoch, i))
300
+ # mae = mean_absolute_error(test_target_tensor[:, :, i], prediction[:, :, i])
301
+ # rmse = mean_squared_error(test_target_tensor[:, :, i], prediction[:, :, i]) ** 0.5
302
+ # mape = masked_mape_np(test_target_tensor[:, :, i], prediction[:, :, i], 0)
303
+ # print('MAE: %.2f' % (mae))
304
+ # print('RMSE: %.2f' % (rmse))
305
+ # print('MAPE: %.2f' % (mape))
306
+ # print()
307
+ # if sw:
308
+ # sw.add_scalar('MAE_%s_points' % (i), mae, epoch)
309
+ # sw.add_scalar('RMSE_%s_points' % (i), rmse, epoch)
310
+ # sw.add_scalar('MAPE_%s_points' % (i), mape, epoch)
311
+
312
+
313
+ def predict_and_save_results_mstgcn(net, data_loader, data_target_tensor, global_step, metric_method,_mean, _std, params_path, type):
314
+ '''
315
+
316
+ :param net: nn.Module
317
+ :param data_loader: torch.utils.data.utils.DataLoader
318
+ :param data_target_tensor: tensor
319
+ :param epoch: int
320
+ :param _mean: (1, 1, 3, 1)
321
+ :param _std: (1, 1, 3, 1)
322
+ :param params_path: the path for saving the results
323
+ :return:
324
+ '''
325
+ net.train(False) # ensure dropout layers are in test mode
326
+
327
+ with torch.no_grad():
328
+
329
+ data_target_tensor = data_target_tensor.cpu().numpy()
330
+
331
+ loader_length = len(data_loader) # nb of batch
332
+
333
+ prediction = [] # 存储所有batch的output
334
+
335
+ input = [] # 存储所有batch的input
336
+
337
+ for batch_index, batch_data in enumerate(data_loader):
338
+
339
+ encoder_inputs, labels = batch_data
340
+
341
+ input.append(encoder_inputs[:, :, 0:1].cpu().numpy()) # (batch, T', 1)
342
+
343
+ outputs = net(encoder_inputs)
344
+
345
+ prediction.append(outputs.detach().cpu().numpy())
346
+
347
+ if batch_index % 100 == 0:
348
+ print('predicting data set batch %s / %s' % (batch_index + 1, loader_length))
349
+
350
+ input = np.concatenate(input, 0)
351
+
352
+ input = re_normalization(input, _mean, _std)
353
+
354
+ prediction = np.concatenate(prediction, 0) # (batch, T', 1)
355
+
356
+ print('input:', input.shape)
357
+ print('prediction:', prediction.shape)
358
+ print('data_target_tensor:', data_target_tensor.shape)
359
+ output_filename = os.path.join(params_path, 'output_epoch_%s_%s' % (global_step, type))
360
+ np.savez(output_filename, input=input, prediction=prediction, data_target_tensor=data_target_tensor)
361
+
362
+ # 计算误差
363
+ excel_list = []
364
+ prediction_length = prediction.shape[2]
365
+
366
+ for i in range(prediction_length):
367
+ assert data_target_tensor.shape[0] == prediction.shape[0]
368
+ print('current epoch: %s, predict %s points' % (global_step, i))
369
+ if metric_method == 'mask':
370
+ mae = masked_mae_test(data_target_tensor[:, :, i], prediction[:, :, i],0.0)
371
+ rmse = masked_rmse_test(data_target_tensor[:, :, i], prediction[:, :, i],0.0)
372
+ mape = masked_mape_np(data_target_tensor[:, :, i], prediction[:, :, i], 0)
373
+ else :
374
+ mae = mean_absolute_error(data_target_tensor[:, :, i], prediction[:, :, i])
375
+ rmse = mean_squared_error(data_target_tensor[:, :, i], prediction[:, :, i]) ** 0.5
376
+ mape = masked_mape_np(data_target_tensor[:, :, i], prediction[:, :, i], 0)
377
+ print('MAE: %.2f' % (mae))
378
+ print('RMSE: %.2f' % (rmse))
379
+ print('MAPE: %.2f' % (mape))
380
+ excel_list.extend([mae, rmse, mape])
381
+
382
+ # print overall results
383
+ if metric_method == 'mask':
384
+ mae = masked_mae_test(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0.0)
385
+ rmse = masked_rmse_test(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0.0)
386
+ mape = masked_mape_np(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0)
387
+ else :
388
+ mae = mean_absolute_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1))
389
+ rmse = mean_squared_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1)) ** 0.5
390
+ mape = masked_mape_np(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0)
391
+ print('all MAE: %.2f' % (mae))
392
+ print('all RMSE: %.2f' % (rmse))
393
+ print('all MAPE: %.2f' % (mape))
394
+ excel_list.extend([mae, rmse, mape])
395
+ print(excel_list)
396
+
397
+
code/model.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import sys
3
+
4
+ sys.path.append("lib")
5
+ from lib.metrics import sce_loss
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import dgl.nn as dglnn
10
+
11
+
12
+ class GMae(nn.Module):
13
+ def __init__(self, encoder, decoder,
14
+ in_dim, hidden_dim, out_dim, mask_rate=0.3, replace_rate=0.1, alpha_l=2,
15
+ embedding_layer_classes=5, embedding_layer_dim=4):
16
+ super(GMae, self).__init__()
17
+ self.Z_embedding = nn.Embedding(embedding_layer_classes, embedding_layer_dim)
18
+ self.encoder = encoder
19
+ self.decoder = decoder
20
+ self.mask_rate = mask_rate
21
+ self.replace_rate = replace_rate
22
+ self.alpha_l = alpha_l
23
+ self.in_dim = in_dim
24
+ self.hidden_dim = hidden_dim
25
+ self.out_dim = out_dim
26
+ self.embedding_layer_classes = embedding_layer_classes
27
+ self.embedding_layer_dim = embedding_layer_dim
28
+ self.enc_mask_token = nn.Parameter(torch.zeros(1, in_dim))
29
+ self.criterion = partial(sce_loss, alpha=alpha_l)
30
+ self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)
31
+
32
+ def encode_atom_index(self, Z_index):
33
+ return self.Z_embedding(Z_index)
34
+
35
+ def encoding_mask_noise(self, g, x, mask_rate=0.3):
36
+ num_nodes = g.num_nodes()
37
+ perm = torch.randperm(num_nodes, device=x.device)
38
+ # random masking
39
+ num_mask_nodes = int(mask_rate * num_nodes)
40
+ mask_nodes = perm[: num_mask_nodes]
41
+ keep_nodes = perm[num_mask_nodes:]
42
+
43
+ if self.replace_rate > 0:
44
+ num_noise_nodes = int(self.replace_rate * num_mask_nodes)
45
+ perm_mask = torch.randperm(num_mask_nodes, device=x.device)
46
+ token_nodes = mask_nodes[perm_mask[: int((1 - self.replace_rate) * num_mask_nodes)]]
47
+ noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]
48
+ noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]
49
+ out_x = x.clone()
50
+ out_x[token_nodes] = 0.0
51
+ out_x[noise_nodes] = x[noise_to_be_chosen]
52
+ else:
53
+ out_x = x.clone()
54
+ token_nodes = mask_nodes
55
+ out_x[mask_nodes] = 0.0
56
+
57
+ out_x[token_nodes] += self.enc_mask_token
58
+ use_g = g.clone()
59
+
60
+ return use_g, out_x, (mask_nodes, keep_nodes)
61
+
62
+ def mask_attr_prediction(self, g, x):
63
+ use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)
64
+ enc_rep = self.encoder(use_g, use_x)
65
+ # ---- attribute reconstruction ----
66
+ rep = self.encoder_to_decoder(enc_rep)
67
+ recon = self.decoder(use_g, rep)
68
+ x_init = x[mask_nodes]
69
+ x_rec = recon[mask_nodes]
70
+ loss = self.criterion(x_rec, x_init)
71
+ return loss
72
+
73
+ def embed(self, g, x):
74
+ rep = self.encoder(g, x)
75
+ return rep
76
+
77
+
78
+ class SimpleGnn(nn.Module):
79
+ def __init__(self, in_feats, hid_feats, out_feats):
80
+ super().__init__()
81
+ self.conv1 = dglnn.SAGEConv(
82
+ in_feats=in_feats, out_feats=hid_feats, aggregator_type="mean")
83
+ self.conv2 = dglnn.SAGEConv(
84
+ in_feats=hid_feats, out_feats=out_feats, aggregator_type="mean")
85
+
86
+ def forward(self, graph, inputs):
87
+ h = self.conv1(graph, inputs)
88
+ h = F.relu(h)
89
+ h = self.conv2(graph, h)
90
+ return h
code/prepare_QM9_dataset.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import time
4
+
5
+ from dgl.data import QM9Dataset
6
+ from dgl.dataloading import GraphDataLoader
7
+ from rdkit import Chem
8
+ from rdkit import RDLogger;
9
+ from torch.utils.data import Dataset
10
+ import torch.nn.functional as F
11
+ from tqdm import tqdm
12
+ import ast
13
+
14
+ from QM9_dataset_class import PreprocessedQM9Dataset
15
+
16
+ RDLogger.DisableLog('rdApp.*')
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.optim as optim
20
+
21
+
22
+ QM9_label_keys = ['mu','alpha','homo','lumo','gap','r2','zpve','U0','U','H','G','Cv']
23
+
24
+
25
+
26
+ def prepare_main(label_keys=None, cutoff=5.0,save_path="dataset"):
27
+ assert save_path !="","save_path shouldn't be empty"
28
+ if label_keys is None:
29
+ raise ValueError('label_keys cannot be None')
30
+ for label_key in label_keys:
31
+ if label_key not in QM9_label_keys:
32
+ raise ValueError('label_key must be in QM9_label_keys,refer:https://docs.dgl.ai/en/0.8.x/generated/dgl.data.QM9Dataset.html')
33
+ dataset = QM9Dataset(label_keys=label_keys, cutoff=5.0)
34
+ dataset_processed = PreprocessedQM9Dataset(dataset)
35
+ print("Store processed QM9 dataset:",save_path)
36
+ dataset_processed.save_dataset("dataset")
37
+ return dataset_processed
38
+
39
+ def main():
40
+ parser = argparse.ArgumentParser(description="Prepare QM9 dataset")
41
+ parser.add_argument('--label_keys', nargs='+', help="label keys in QM9 dataset,like 'mu' 'gap'....")
42
+ parser.add_argument('--cutoff', type=float, default=5.0, help="cutoff for atom number")
43
+ parser.add_argument('--save_path', type=str, default="dataset", help="processed_dataset save path")
44
+ args = parser.parse_args()
45
+ prepare_main(label_keys=args.label_keys, cutoff=args.cutoff)
46
+
47
+ if __name__ == '__main__':
48
+ main()
code/run.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ import dgl
5
+ import torch.utils.data
6
+ from dgl.dataloading import GraphDataLoader
7
+ from torch import optim
8
+ from tqdm import tqdm
9
+ from QM9_dataset_class import PreprocessedQM9Dataset
10
+ from model import SimpleGnn, GMae
11
+ import torch.nn as nn
12
+
13
+ def train_epoch(epoch, graphLoader: torch.utils.data.DataLoader,
14
+ model: nn.Module,device, optimizer:torch.optim.Optimizer,
15
+ save_dir:str
16
+ ):
17
+ print(f"epoch {epoch} started!")
18
+ model.train()
19
+ model.encoder.train()
20
+ model.decoder.train()
21
+ model.to(device)
22
+ loss_epoch = 0
23
+ for batch in tqdm(graphLoader):
24
+ optimizer.zero_grad()
25
+ batch_g, _ = batch
26
+ R = batch_g.ndata["R"].to(device)
27
+ # Z_index = batch_g.ndata["Z_index"].to(device)
28
+ Z_index = batch_g.ndata["Z_index"].to(device)
29
+ Z_emb = model.encode_atom_index(Z_index)
30
+ feat = torch.cat([R, Z_emb], dim=1)
31
+ batch_g = batch_g.to(device)
32
+ loss = model.mask_attr_prediction(batch_g, feat)
33
+ loss.backward()
34
+ optimizer.step()
35
+ loss_epoch += loss.item()
36
+ return loss_epoch
37
+
38
+
39
+ def train_loop(dataset_path, epochs, batch_size,device,save_dir):
40
+ device = torch.device(device)
41
+ dataset = PreprocessedQM9Dataset(None)
42
+ dataset.load_dataset(dataset_path)
43
+ print("Dataset loaded:", dataset_path, "Total samples:", len(dataset))
44
+ print("Initializing dataloader")
45
+ myGLoader = GraphDataLoader(dataset, batch_size=batch_size, pin_memory=True,shuffle=False)
46
+ sage_enc = SimpleGnn(in_feats=7, hid_feats=4, out_feats=4) # 7 = R_dim(3)+Z_embedding_dim(4)
47
+ sage_dec = SimpleGnn(in_feats=4, hid_feats=4, out_feats=7)
48
+ gmae = GMae(sage_enc, sage_dec, 7, 4, 7, replace_rate=0)
49
+ optimizer = optim.Adam(gmae.parameters(), lr=1e-3)
50
+ print("Start training", "epochs:", epochs, "batch_size:", batch_size)
51
+ for epoch in range(epochs):
52
+ loss_epoch = train_epoch(epoch, myGLoader,gmae,device,optimizer,save_dir)
53
+ formatted_loss_epoch = f"{loss_epoch:.3f}"
54
+ save_path = os.path.join(save_dir,f"epoch_{epoch}",f"gmae_{formatted_loss_epoch}.pt")
55
+ save_subdir = os.path.dirname(save_path)
56
+ if not os.path.exists(save_subdir):
57
+ os.makedirs(save_subdir, exist_ok=True)
58
+ torch.save(gmae.state_dict(), save_path)
59
+ print(f"Epoch:{epoch},loss:{loss_epoch},Model saved:{save_path}")
60
+ with torch.no_grad():
61
+ embedded_graphs = []
62
+ print(f"Epoch:{epoch},start embedding")
63
+ gmae.eval()
64
+ gmae.encoder.eval()
65
+ for batch in tqdm(myGLoader):
66
+ batch_g, _ = batch
67
+ R = batch_g.ndata["R"].to(device)
68
+ Z_index = batch_g.ndata["Z_index"].to(device)
69
+ Z_emb = gmae.encode_atom_index(Z_index)
70
+ feat = torch.cat([R, Z_emb], dim=1)
71
+ batch_g = batch_g.to(device)
72
+ batch_g.ndata["embedding"] = gmae.embed(batch_g,feat)
73
+ unbatched_graphs = dgl.unbatch(batch_g)
74
+ embedded_graphs.extend(unbatched_graphs)
75
+ for idx,embedded_graph in enumerate(embedded_graphs):
76
+ embeddings_save_path = os.path.join(save_dir, f"epoch_{epoch}", f"embedding_{idx}.dgl")
77
+ dgl.save_graphs(embeddings_save_path, [embedded_graph])
78
+ print(f"epoch:{epoch},embedding saved:{embeddings_save_path},total_graphs:{len(embedded_graphs)}")
79
+
80
+
81
+
82
+ def main():
83
+ parser = argparse.ArgumentParser(description="Prepare QM9 dataset")
84
+ parser.add_argument('--dataset_path', type=str, default='dataset/QM9_dataset_processed.pt')
85
+ parser.add_argument('--batch_size', type=int, default=4)
86
+ parser.add_argument('--epochs', type=int, default=10, help='number of epochs')
87
+ parser.add_argument("--device", type=str, default='cuda:0')
88
+ parser.add_argument("--save_dir", type=str, default='./model')
89
+ args = parser.parse_args()
90
+ train_loop(args.dataset_path, args.epochs, args.batch_size,args.device,args.save_dir)
91
+
92
+
93
+ if __name__ == '__main__':
94
+ main()
model/epoch_0/embedding_0.dgl ADDED
Binary file (5.49 kB). View file
 
model/epoch_0/embedding_1.dgl ADDED
Binary file (5.39 kB). View file
 
model/epoch_0/embedding_10.dgl ADDED
Binary file (5.76 kB). View file
 
model/epoch_0/embedding_100.dgl ADDED
Binary file (6.48 kB). View file
 
model/epoch_0/embedding_1000.dgl ADDED
Binary file (6.46 kB). View file
 
model/epoch_0/embedding_10000.dgl ADDED
Binary file (7.61 kB). View file
 
model/epoch_0/embedding_100000.dgl ADDED
Binary file (8.11 kB). View file
 
model/epoch_0/embedding_100001.dgl ADDED
Binary file (7.86 kB). View file
 
model/epoch_0/embedding_100002.dgl ADDED
Binary file (7.89 kB). View file
 
model/epoch_0/embedding_100003.dgl ADDED
Binary file (7.64 kB). View file
 
model/epoch_0/embedding_100004.dgl ADDED
Binary file (7.63 kB). View file
 
model/epoch_0/embedding_100005.dgl ADDED
Binary file (7.31 kB). View file
 
model/epoch_0/embedding_100006.dgl ADDED
Binary file (8.21 kB). View file
 
model/epoch_0/embedding_100007.dgl ADDED
Binary file (8.51 kB). View file
 
model/epoch_0/embedding_100008.dgl ADDED
Binary file (8.24 kB). View file
 
model/epoch_0/embedding_100009.dgl ADDED
Binary file (8.44 kB). View file
 
model/epoch_0/embedding_10001.dgl ADDED
Binary file (7.33 kB). View file
 
model/epoch_0/embedding_100010.dgl ADDED
Binary file (8.66 kB). View file
 
model/epoch_0/embedding_100011.dgl ADDED
Binary file (8.33 kB). View file
 
model/epoch_0/embedding_100012.dgl ADDED
Binary file (8.44 kB). View file
 
model/epoch_0/embedding_100013.dgl ADDED
Binary file (8.24 kB). View file
 
model/epoch_0/embedding_100014.dgl ADDED
Binary file (8.69 kB). View file
 
model/epoch_0/embedding_100015.dgl ADDED
Binary file (8.4 kB). View file
 
model/epoch_0/embedding_100016.dgl ADDED
Binary file (8.44 kB). View file
 
model/epoch_0/embedding_100017.dgl ADDED
Binary file (8.24 kB). View file
 
model/epoch_0/embedding_100018.dgl ADDED
Binary file (8.79 kB). View file
 
model/epoch_0/embedding_100019.dgl ADDED
Binary file (8.35 kB). View file
 
model/epoch_0/embedding_10002.dgl ADDED
Binary file (8.02 kB). View file
 
model/epoch_0/embedding_100020.dgl ADDED
Binary file (8.19 kB). View file