Change link field name from 'label' to 'link' (#92)
Browse files
.gitignore
CHANGED
@@ -1,2 +1,18 @@
|
|
1 |
.idea/
|
2 |
.DS_Store
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
.idea/
|
2 |
.DS_Store
|
3 |
+
|
4 |
+
venv/
|
5 |
+
.venv/
|
6 |
+
*.pyc
|
7 |
+
__pycache__/
|
8 |
+
instance/
|
9 |
+
.cache/
|
10 |
+
.pytest_cache/
|
11 |
+
.coverage
|
12 |
+
htmlcov/
|
13 |
+
dist/
|
14 |
+
build/
|
15 |
+
*.egg-info/
|
16 |
+
*.swp
|
17 |
+
*~
|
18 |
+
.eggs
|
huggingface/generate_ancient_greek_dataset.py
CHANGED
@@ -13,11 +13,11 @@
|
|
13 |
# limitations under the License.
|
14 |
import json
|
15 |
|
|
|
16 |
from vocabulary_parser import ANCIENT_GREEK
|
17 |
from vocabulary_parser import get_attributes
|
18 |
from vocabulary_parser import get_definitions
|
19 |
from vocabulary_parser import get_vocabulary
|
20 |
-
from wilhelm_python_sdk.database_clients import get_node_label_attribute_key
|
21 |
|
22 |
|
23 |
def generate_dataset(yaml_path: str, dataset_path: str):
|
@@ -47,5 +47,5 @@ def generate_dataset(yaml_path: str, dataset_path: str):
|
|
47 |
target_node = {label_key: definition}
|
48 |
label = {label_key: predicate if predicate else "definition"}
|
49 |
|
50 |
-
graph.write(json.dumps({"source": source_node, "target": target_node,
|
51 |
graph.write("\n")
|
|
|
13 |
# limitations under the License.
|
14 |
import json
|
15 |
|
16 |
+
from database.database_clients import get_node_label_attribute_key
|
17 |
from vocabulary_parser import ANCIENT_GREEK
|
18 |
from vocabulary_parser import get_attributes
|
19 |
from vocabulary_parser import get_definitions
|
20 |
from vocabulary_parser import get_vocabulary
|
|
|
21 |
|
22 |
|
23 |
def generate_dataset(yaml_path: str, dataset_path: str):
|
|
|
47 |
target_node = {label_key: definition}
|
48 |
label = {label_key: predicate if predicate else "definition"}
|
49 |
|
50 |
+
graph.write(json.dumps({"source": source_node, "target": target_node, "link": label}))
|
51 |
graph.write("\n")
|
huggingface/generate_german_dataset.py
CHANGED
@@ -13,13 +13,13 @@
|
|
13 |
# limitations under the License.
|
14 |
import json
|
15 |
|
|
|
16 |
from german_parser import get_declension_attributes
|
17 |
from vocabulary_parser import GERMAN
|
18 |
from vocabulary_parser import get_attributes
|
19 |
from vocabulary_parser import get_definitions
|
20 |
from vocabulary_parser import get_inferred_links
|
21 |
from vocabulary_parser import get_vocabulary
|
22 |
-
from wilhelm_python_sdk.database_clients import get_node_label_attribute_key
|
23 |
|
24 |
|
25 |
def generate_dataset(yaml_path: str, dataset_path: str):
|
@@ -56,5 +56,5 @@ def generate_dataset(yaml_path: str, dataset_path: str):
|
|
56 |
target_node = all_nodes[link["target_label"]]
|
57 |
label = link["attributes"]
|
58 |
|
59 |
-
graph.write(json.dumps({"source": source_node, "target": target_node,
|
60 |
graph.write("\n")
|
|
|
13 |
# limitations under the License.
|
14 |
import json
|
15 |
|
16 |
+
from database.database_clients import get_node_label_attribute_key
|
17 |
from german_parser import get_declension_attributes
|
18 |
from vocabulary_parser import GERMAN
|
19 |
from vocabulary_parser import get_attributes
|
20 |
from vocabulary_parser import get_definitions
|
21 |
from vocabulary_parser import get_inferred_links
|
22 |
from vocabulary_parser import get_vocabulary
|
|
|
23 |
|
24 |
|
25 |
def generate_dataset(yaml_path: str, dataset_path: str):
|
|
|
56 |
target_node = all_nodes[link["target_label"]]
|
57 |
label = link["attributes"]
|
58 |
|
59 |
+
graph.write(json.dumps({"source": source_node, "target": target_node, "link": label}))
|
60 |
graph.write("\n")
|
huggingface/generate_latin_dataset.py
CHANGED
@@ -13,11 +13,11 @@
|
|
13 |
# limitations under the License.
|
14 |
import json
|
15 |
|
|
|
16 |
from vocabulary_parser import LATIN
|
17 |
from vocabulary_parser import get_attributes
|
18 |
from vocabulary_parser import get_definitions
|
19 |
from vocabulary_parser import get_vocabulary
|
20 |
-
from wilhelm_python_sdk.database_clients import get_node_label_attribute_key
|
21 |
|
22 |
|
23 |
def generate_dataset(yaml_path: str, dataset_path: str):
|
@@ -47,5 +47,5 @@ def generate_dataset(yaml_path: str, dataset_path: str):
|
|
47 |
target_node = {label_key: definition}
|
48 |
label = {label_key: predicate if predicate else "definition"}
|
49 |
|
50 |
-
graph.write(json.dumps({"source": source_node, "target": target_node,
|
51 |
graph.write("\n")
|
|
|
13 |
# limitations under the License.
|
14 |
import json
|
15 |
|
16 |
+
from database.database_clients import get_node_label_attribute_key
|
17 |
from vocabulary_parser import LATIN
|
18 |
from vocabulary_parser import get_attributes
|
19 |
from vocabulary_parser import get_definitions
|
20 |
from vocabulary_parser import get_vocabulary
|
|
|
21 |
|
22 |
|
23 |
def generate_dataset(yaml_path: str, dataset_path: str):
|
|
|
47 |
target_node = {label_key: definition}
|
48 |
label = {label_key: predicate if predicate else "definition"}
|
49 |
|
50 |
+
graph.write(json.dumps({"source": source_node, "target": target_node, "link": label}))
|
51 |
graph.write("\n")
|
huggingface/vocabulary_parser.py
CHANGED
@@ -177,8 +177,8 @@ def get_inferred_links(
|
|
177 |
|
178 |
This function is the point of extending link inference capabilities. At this point, the link inference includes
|
179 |
|
180 |
-
- :py:meth:`token sharing <
|
181 |
-
- :py:meth:`token sharing <
|
182 |
|
183 |
:param vocabulary: A wilhelm-vocabulary repo YAML file deserialized
|
184 |
:param label_key: The name of the node attribute that will be used as the label in displaying the node
|
@@ -304,7 +304,7 @@ def get_inferred_tokenization_links(
|
|
304 |
def get_structurally_similar_links(vocabulary: list[dict], label_key: str) -> list[dict]:
|
305 |
"""
|
306 |
Return a list of inferred links between structurally-related vocabulary terms that are determined by the function
|
307 |
-
:py:meth:`token sharing <
|
308 |
|
309 |
This was inspired by the spotting the relationships among::
|
310 |
|
|
|
177 |
|
178 |
This function is the point of extending link inference capabilities. At this point, the link inference includes
|
179 |
|
180 |
+
- :py:meth:`token sharing <wilhelm_data_loader.vocabulary_parser.get_inferred_tokenization_links>`
|
181 |
+
- :py:meth:`token sharing <wilhelm_data_loader.vocabulary_parser.get_levenshtein_links>`
|
182 |
|
183 |
:param vocabulary: A wilhelm-vocabulary repo YAML file deserialized
|
184 |
:param label_key: The name of the node attribute that will be used as the label in displaying the node
|
|
|
304 |
def get_structurally_similar_links(vocabulary: list[dict], label_key: str) -> list[dict]:
|
305 |
"""
|
306 |
Return a list of inferred links between structurally-related vocabulary terms that are determined by the function
|
307 |
+
:py:meth:`token sharing <wilhelm_data_loader.vocabulary_parser.is_structurally_similar>`.
|
308 |
|
309 |
This was inspired by the spotting the relationships among::
|
310 |
|
setup.py
CHANGED
@@ -2,7 +2,7 @@ from setuptools import find_packages
|
|
2 |
from setuptools import setup
|
3 |
|
4 |
setup(
|
5 |
-
name="
|
6 |
version="0.0.1",
|
7 |
description="A vocabulary processor specifically designed for QubitPi",
|
8 |
url="https://github.com/QubitPi/wilhelm-vocabulary",
|
@@ -11,7 +11,7 @@ setup(
|
|
11 |
license="Apache-2.0",
|
12 |
packages=find_packages(),
|
13 |
python_requires='>=3.10',
|
14 |
-
install_requires=["pyyaml", "nltk", "
|
15 |
zip_safe=False,
|
16 |
include_package_data=True,
|
17 |
setup_requires=["setuptools-pep8", "isort"],
|
|
|
2 |
from setuptools import setup
|
3 |
|
4 |
setup(
|
5 |
+
name="wilhelm_vocabulary",
|
6 |
version="0.0.1",
|
7 |
description="A vocabulary processor specifically designed for QubitPi",
|
8 |
url="https://github.com/QubitPi/wilhelm-vocabulary",
|
|
|
11 |
license="Apache-2.0",
|
12 |
packages=find_packages(),
|
13 |
python_requires='>=3.10',
|
14 |
+
install_requires=["pyyaml", "nltk", "wilhelm_data_loader"],
|
15 |
zip_safe=False,
|
16 |
include_package_data=True,
|
17 |
setup_requires=["setuptools-pep8", "isort"],
|