Add Python style check to CI/CD (#3)
Browse files- .github/workflows/ci-cd.yaml +17 -1
- .isort.cfg +2 -0
- README.md +15 -3
- extract.py +6 -2
- requirements.txt +3 -0
- setup.cfg +3 -0
- tests/wiktionary/wiktextract/test_parse.py +5 -4
- wiktionary/wiktextract/extract.py +26 -10
- wiktionary/wiktextract/german.py +1 -1
- wiktionary/wiktextract/parse.py +3 -1
.github/workflows/ci-cd.yaml
CHANGED
@@ -51,8 +51,24 @@ jobs:
|
|
51 |
with:
|
52 |
fail: true
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
sync-to-huggingface-space:
|
55 |
-
needs: [yaml-lint, linkChecker]
|
56 |
runs-on: ubuntu-latest
|
57 |
steps:
|
58 |
- uses: actions/checkout@v3
|
|
|
51 |
with:
|
52 |
fail: true
|
53 |
|
54 |
+
code-style:
|
55 |
+
name: Python Code Style Check
|
56 |
+
runs-on: ubuntu-latest
|
57 |
+
steps:
|
58 |
+
- uses: actions/checkout@v3
|
59 |
+
- name: Set up Python 3.10
|
60 |
+
uses: actions/setup-python@v5
|
61 |
+
with:
|
62 |
+
python-version: ${{ env.PYTHON_VERSION }}
|
63 |
+
- name: Install dependencies
|
64 |
+
run: pip3 install -r requirements.txt
|
65 |
+
- name: Check import orders
|
66 |
+
run: isort --check .
|
67 |
+
- name: pep8
|
68 |
+
run: pycodestyle .
|
69 |
+
|
70 |
sync-to-huggingface-space:
|
71 |
+
needs: [yaml-lint, linkChecker, code-style]
|
72 |
runs-on: ubuntu-latest
|
73 |
steps:
|
74 |
- uses: actions/checkout@v3
|
.isort.cfg
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
[settings]
|
2 |
+
force_single_line = True
|
README.md
CHANGED
@@ -83,12 +83,18 @@ The Wiktionary language data is available on 🤗 [Hugging Face Datasets][Huggin
|
|
83 |
|
84 |
```python
|
85 |
from datasets import load_dataset
|
86 |
-
dataset = load_dataset("QubitPi/wiktionary-data"
|
87 |
```
|
88 |
|
89 |
There are __two__ data subsets:
|
90 |
|
91 |
-
1. __Languages__ subset that contains
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
- `German`
|
94 |
- `Latin`
|
@@ -101,7 +107,13 @@ There are __two__ data subsets:
|
|
101 |
|
102 |
2. __Graph__ subset that is useful for constructing knowledge graphs:
|
103 |
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
The _Graph_ data ontology is the following:
|
107 |
|
|
|
83 |
|
84 |
```python
|
85 |
from datasets import load_dataset
|
86 |
+
dataset = load_dataset("QubitPi/wiktionary-data")
|
87 |
```
|
88 |
|
89 |
There are __two__ data subsets:
|
90 |
|
91 |
+
1. __Languages__ subset that contains extraction of a subset of supported languages:
|
92 |
+
|
93 |
+
```console
|
94 |
+
dataset = load_dataset("QubitPi/wiktionary-data", "Wiktionary")
|
95 |
+
```
|
96 |
+
|
97 |
+
The subset contains the following splits
|
98 |
|
99 |
- `German`
|
100 |
- `Latin`
|
|
|
107 |
|
108 |
2. __Graph__ subset that is useful for constructing knowledge graphs:
|
109 |
|
110 |
+
```console
|
111 |
+
dataset = load_dataset("QubitPi/wiktionary-data", "Knowledge Graph")
|
112 |
+
```
|
113 |
+
|
114 |
+
The subset contains the following splits
|
115 |
+
|
116 |
+
- `AllLanguage`: all the languages listed above in a giant graph
|
117 |
|
118 |
The _Graph_ data ontology is the following:
|
119 |
|
extract.py
CHANGED
@@ -14,12 +14,16 @@
|
|
14 |
from wiktionary.wiktextract.extract import extract_data
|
15 |
from wiktionary.wiktextract.extract import extract_graph
|
16 |
|
17 |
-
|
18 |
if __name__ == "__main__":
|
19 |
import argparse
|
20 |
|
21 |
parser = argparse.ArgumentParser(description='Post-process Wiktextract data')
|
22 |
-
parser.add_argument(
|
|
|
|
|
|
|
|
|
|
|
23 |
args = vars(parser.parse_args())
|
24 |
|
25 |
extract_data(args["input"])
|
|
|
14 |
from wiktionary.wiktextract.extract import extract_data
|
15 |
from wiktionary.wiktextract.extract import extract_graph
|
16 |
|
|
|
17 |
if __name__ == "__main__":
|
18 |
import argparse
|
19 |
|
20 |
parser = argparse.ArgumentParser(description='Post-process Wiktextract data')
|
21 |
+
parser.add_argument(
|
22 |
+
'-i',
|
23 |
+
'--input',
|
24 |
+
help='Raw data file, i.e. the path to raw-wiktextract-data.jsonl',
|
25 |
+
required=True
|
26 |
+
)
|
27 |
args = vars(parser.parse_args())
|
28 |
|
29 |
extract_data(args["input"])
|
requirements.txt
CHANGED
@@ -1 +1,4 @@
|
|
1 |
wilhelm_python_sdk
|
|
|
|
|
|
|
|
1 |
wilhelm_python_sdk
|
2 |
+
|
3 |
+
pycodestyle
|
4 |
+
isort
|
setup.cfg
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[pep8]
|
2 |
+
max-line-length = 120
|
3 |
+
exclude = ./.eggs,./build
|
tests/wiktionary/wiktextract/test_parse.py
CHANGED
@@ -53,12 +53,12 @@ class TestParse(unittest.TestCase):
|
|
53 |
}),
|
54 |
)
|
55 |
|
56 |
-
|
57 |
def test_get_audios(self):
|
58 |
self.assertEqual(
|
59 |
[{
|
60 |
"ogg_url": "https://upload.wikimedia.org/wikipedia/commons/a/a8/De-Volap%C3%BCk.ogg",
|
61 |
-
"mp3_url": "https://upload.wikimedia.org/wikipedia/commons/transcoded/a/a8/De-Volap%C3%BCk.ogg/
|
|
|
62 |
}],
|
63 |
get_audios({
|
64 |
"sounds": [
|
@@ -71,8 +71,9 @@ class TestParse(unittest.TestCase):
|
|
71 |
{
|
72 |
"audio": "De-Volapük.ogg",
|
73 |
"ogg_url": "https://upload.wikimedia.org/wikipedia/commons/a/a8/De-Volap%C3%BCk.ogg",
|
74 |
-
"mp3_url": "https://upload.wikimedia.org/wikipedia/commons/transcoded/a/a8/
|
|
|
75 |
}
|
76 |
]
|
77 |
})
|
78 |
-
)
|
|
|
53 |
}),
|
54 |
)
|
55 |
|
|
|
56 |
def test_get_audios(self):
|
57 |
self.assertEqual(
|
58 |
[{
|
59 |
"ogg_url": "https://upload.wikimedia.org/wikipedia/commons/a/a8/De-Volap%C3%BCk.ogg",
|
60 |
+
"mp3_url": "https://upload.wikimedia.org/wikipedia/commons/transcoded/a/a8/De-Volap%C3%BCk.ogg/" +
|
61 |
+
"De-Volap%C3%BCk.ogg.mp3"
|
62 |
}],
|
63 |
get_audios({
|
64 |
"sounds": [
|
|
|
71 |
{
|
72 |
"audio": "De-Volapük.ogg",
|
73 |
"ogg_url": "https://upload.wikimedia.org/wikipedia/commons/a/a8/De-Volap%C3%BCk.ogg",
|
74 |
+
"mp3_url": "https://upload.wikimedia.org/wikipedia/commons/transcoded/a/a8/" +
|
75 |
+
"De-Volap%C3%BCk.ogg/De-Volap%C3%BCk.ogg.mp3"
|
76 |
}
|
77 |
]
|
78 |
})
|
79 |
+
)
|
wiktionary/wiktextract/extract.py
CHANGED
@@ -26,9 +26,11 @@ def extract_data(wiktextract_data_path: str):
|
|
26 |
- pos: the Part of Speech of this word
|
27 |
- definitions: an array of definitions, each element of the array is a string
|
28 |
|
29 |
-
:param wiktextract_data_path: the path of the wiktextract jsonl file. Can be downloaded from
|
|
|
30 |
"""
|
31 |
import json
|
|
|
32 |
from wiktionary.wiktextract.parse import get_audios
|
33 |
from wiktionary.wiktextract.parse import get_definitions
|
34 |
|
@@ -40,8 +42,7 @@ def extract_data(wiktextract_data_path: str):
|
|
40 |
open("old-persian-wiktextract-data.jsonl", "w") as old_persian,
|
41 |
open("akkadian-wiktextract-data.jsonl", "w") as akkadian,
|
42 |
open("elamite-wiktextract-data.jsonl", "w") as elamite,
|
43 |
-
open("sanskrit-wiktextract-data.jsonl", "w") as sanskrit
|
44 |
-
):
|
45 |
for line in data:
|
46 |
vocabulary = json.loads(line)
|
47 |
if "lang" in vocabulary:
|
@@ -63,25 +64,39 @@ def extract_data(wiktextract_data_path: str):
|
|
63 |
)
|
64 |
german.write("\n")
|
65 |
if vocabulary["lang"] == "Latin":
|
66 |
-
latin.write(json.dumps(
|
|
|
|
|
67 |
latin.write("\n")
|
68 |
if vocabulary["lang"] == "Ancient Greek":
|
69 |
-
ancient_greek.write(json.dumps(
|
|
|
|
|
70 |
ancient_greek.write("\n")
|
71 |
if vocabulary["lang"] == "Korean":
|
72 |
-
korean.write(json.dumps(
|
|
|
|
|
73 |
korean.write("\n")
|
74 |
if vocabulary["lang"] == "Old Persian":
|
75 |
-
old_persian.write(json.dumps(
|
|
|
|
|
76 |
old_persian.write("\n")
|
77 |
if vocabulary["lang"] == "Akkadian":
|
78 |
-
akkadian.write(json.dumps(
|
|
|
|
|
79 |
akkadian.write("\n")
|
80 |
if vocabulary["lang"] == "Elamite":
|
81 |
-
elamite.write(json.dumps(
|
|
|
|
|
82 |
elamite.write("\n")
|
83 |
if vocabulary["lang"] == "Sanskrit":
|
84 |
-
sanskrit.write(json.dumps(
|
|
|
|
|
85 |
sanskrit.write("\n")
|
86 |
|
87 |
|
@@ -90,6 +105,7 @@ LANGUAGES = ["German", "Latin", "Ancient Greek", "Korean", "Old Persian", "Akkad
|
|
90 |
|
91 |
def extract_graph(wiktextract_data_path: str):
|
92 |
import json
|
|
|
93 |
from wiktionary.wiktextract.parse import get_definitions
|
94 |
|
95 |
with (open(wiktextract_data_path) as data, open("word-definition-graph-data.jsonl", "w") as graph):
|
|
|
26 |
- pos: the Part of Speech of this word
|
27 |
- definitions: an array of definitions, each element of the array is a string
|
28 |
|
29 |
+
:param wiktextract_data_path: the path of the wiktextract jsonl file. Can be downloaded from
|
30 |
+
https://kaikki.org/dictionary/rawdata.html
|
31 |
"""
|
32 |
import json
|
33 |
+
|
34 |
from wiktionary.wiktextract.parse import get_audios
|
35 |
from wiktionary.wiktextract.parse import get_definitions
|
36 |
|
|
|
42 |
open("old-persian-wiktextract-data.jsonl", "w") as old_persian,
|
43 |
open("akkadian-wiktextract-data.jsonl", "w") as akkadian,
|
44 |
open("elamite-wiktextract-data.jsonl", "w") as elamite,
|
45 |
+
open("sanskrit-wiktextract-data.jsonl", "w") as sanskrit):
|
|
|
46 |
for line in data:
|
47 |
vocabulary = json.loads(line)
|
48 |
if "lang" in vocabulary:
|
|
|
64 |
)
|
65 |
german.write("\n")
|
66 |
if vocabulary["lang"] == "Latin":
|
67 |
+
latin.write(json.dumps(
|
68 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
69 |
+
))
|
70 |
latin.write("\n")
|
71 |
if vocabulary["lang"] == "Ancient Greek":
|
72 |
+
ancient_greek.write(json.dumps(
|
73 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
74 |
+
))
|
75 |
ancient_greek.write("\n")
|
76 |
if vocabulary["lang"] == "Korean":
|
77 |
+
korean.write(json.dumps(
|
78 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
79 |
+
))
|
80 |
korean.write("\n")
|
81 |
if vocabulary["lang"] == "Old Persian":
|
82 |
+
old_persian.write(json.dumps(
|
83 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
84 |
+
))
|
85 |
old_persian.write("\n")
|
86 |
if vocabulary["lang"] == "Akkadian":
|
87 |
+
akkadian.write(json.dumps(
|
88 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
89 |
+
))
|
90 |
akkadian.write("\n")
|
91 |
if vocabulary["lang"] == "Elamite":
|
92 |
+
elamite.write(json.dumps(
|
93 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
94 |
+
))
|
95 |
elamite.write("\n")
|
96 |
if vocabulary["lang"] == "Sanskrit":
|
97 |
+
sanskrit.write(json.dumps(
|
98 |
+
{"term": term, "part of speech": pos, "definitions": definitions, "audios": audios}
|
99 |
+
))
|
100 |
sanskrit.write("\n")
|
101 |
|
102 |
|
|
|
105 |
|
106 |
def extract_graph(wiktextract_data_path: str):
|
107 |
import json
|
108 |
+
|
109 |
from wiktionary.wiktextract.parse import get_definitions
|
110 |
|
111 |
with (open(wiktextract_data_path) as data, open("word-definition-graph-data.jsonl", "w") as graph):
|
wiktionary/wiktextract/german.py
CHANGED
@@ -16,6 +16,7 @@ from wiktionary.wiktextract.parse import get_part_of_speech
|
|
16 |
|
17 |
GERMAN_CASES = ["nominative", "genitive", "dative", "accusative"]
|
18 |
|
|
|
19 |
def get_gender_modifier(vocabulary) -> str:
|
20 |
if get_part_of_speech(vocabulary) == "noun" and "categories" in vocabulary:
|
21 |
if "German masculine nouns" in vocabulary["categories"]:
|
@@ -103,7 +104,6 @@ def get_german_adj_declensions(vocabulary) -> dict:
|
|
103 |
return conjugations
|
104 |
|
105 |
|
106 |
-
|
107 |
def get_german_noun_declensions(vocabulary):
|
108 |
if "forms" not in vocabulary:
|
109 |
return {}
|
|
|
16 |
|
17 |
GERMAN_CASES = ["nominative", "genitive", "dative", "accusative"]
|
18 |
|
19 |
+
|
20 |
def get_gender_modifier(vocabulary) -> str:
|
21 |
if get_part_of_speech(vocabulary) == "noun" and "categories" in vocabulary:
|
22 |
if "German masculine nouns" in vocabulary["categories"]:
|
|
|
104 |
return conjugations
|
105 |
|
106 |
|
|
|
107 |
def get_german_noun_declensions(vocabulary):
|
108 |
if "forms" not in vocabulary:
|
109 |
return {}
|
wiktionary/wiktextract/parse.py
CHANGED
@@ -15,8 +15,10 @@
|
|
15 |
def get_part_of_speech(vocabulary) -> str:
|
16 |
return vocabulary["pos"] if "pos" in vocabulary else "Unknown"
|
17 |
|
|
|
18 |
def get_definitions(vocabulary) -> list:
|
19 |
-
return [sense["glosses"][0] if "glosses" in sense else sense["raw_glosses"][0] for sense in vocabulary["senses"] if
|
|
|
20 |
|
21 |
|
22 |
def get_audios(vocabulary) -> list:
|
|
|
15 |
def get_part_of_speech(vocabulary) -> str:
|
16 |
return vocabulary["pos"] if "pos" in vocabulary else "Unknown"
|
17 |
|
18 |
+
|
19 |
def get_definitions(vocabulary) -> list:
|
20 |
+
return [sense["glosses"][0] if "glosses" in sense else sense["raw_glosses"][0] for sense in vocabulary["senses"] if
|
21 |
+
"glosses" in sense or "raw_glosses" in sense]
|
22 |
|
23 |
|
24 |
def get_audios(vocabulary) -> list:
|