FiendHunter
commited on
Commit
•
2706a1d
1
Parent(s):
10b6f81
Upload folder using huggingface_hub
Browse files- README.md +1 -0
- scripts/.gitignore +4 -0
- scripts/Data_fetch/github_parser.py +57 -0
- scripts/Data_fetch/list_file_types.py +66 -0
- scripts/Data_fetch/local_parser.py +47 -0
- scripts/Data_fetch/ocr.py +60 -0
- scripts/Data_fetch/pdf_parser.py +60 -0
- scripts/Data_fetch/readme.md +5 -0
- scripts/Data_fetch/repo2text_parser.py +101 -0
- scripts/Data_fetch/repopack_parser.py +84 -0
- scripts/Helper/chunking.py +32 -0
- scripts/Helper/cleaner.py +11 -0
- scripts/Helper/count.py +28 -0
- scripts/Helper/counter.py +50 -0
- scripts/Helper/local_csv.py +31 -0
- scripts/Summary/checkpoint.py +196 -0
- scripts/Summary/qna_only.py +120 -0
- scripts/Summary/readme.md +9 -0
- scripts/Summary/summarizer(openai).py +116 -0
- scripts/Summary/summarizer.py +135 -0
- scripts/Summary/summarizer_old.py +127 -0
- scripts/Summary/summary_only.py +56 -0
- scripts/readme.md +3 -0
- scripts/upload.py +8 -0
README.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
This is the repository for LFX Mentorship Term-3 2024 for the project "Create an LLM app with deep understanding of a GitHub repo" under CNCF - Wasmedge
|
scripts/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Helper
|
2 |
+
*.csv
|
3 |
+
checkpoint.py
|
4 |
+
upload.py
|
scripts/Data_fetch/github_parser.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import csv
|
3 |
+
|
4 |
+
|
5 |
+
GITHUB_TOKEN = "" # Replace with your actual GitHub token
|
6 |
+
|
7 |
+
def get_github_contents(repo_url):
|
8 |
+
parts = repo_url.rstrip('/').split('/')
|
9 |
+
user = parts[-2]
|
10 |
+
repo = parts[-1]
|
11 |
+
|
12 |
+
api_url = f"https://api.github.com/repos/{user}/{repo}/contents/"
|
13 |
+
|
14 |
+
headers = {
|
15 |
+
"Authorization": f"token {GITHUB_TOKEN}"
|
16 |
+
}
|
17 |
+
|
18 |
+
response = requests.get(api_url, headers=headers)
|
19 |
+
response.raise_for_status()
|
20 |
+
|
21 |
+
return response.json()
|
22 |
+
|
23 |
+
def process_contents(contents, paths=[], parent_path=""):
|
24 |
+
headers = {
|
25 |
+
"Authorization": f"token {GITHUB_TOKEN}"
|
26 |
+
}
|
27 |
+
|
28 |
+
for item in contents:
|
29 |
+
path = parent_path + item['name']
|
30 |
+
if item['type'] == 'dir':
|
31 |
+
dir_response = requests.get(item['url'], headers=headers)
|
32 |
+
dir_response.raise_for_status()
|
33 |
+
dir_contents = dir_response.json()
|
34 |
+
process_contents(dir_contents, paths, path + "/")
|
35 |
+
else:
|
36 |
+
file_response = requests.get(item['download_url'], headers=headers)
|
37 |
+
file_response.raise_for_status()
|
38 |
+
file_content = file_response.text
|
39 |
+
paths.append({"Path": path, "Content": file_content})
|
40 |
+
|
41 |
+
return paths
|
42 |
+
|
43 |
+
def write_to_csv(data, output_file):
|
44 |
+
with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
|
45 |
+
fieldnames = ['Path', 'Content']
|
46 |
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
47 |
+
|
48 |
+
writer.writeheader()
|
49 |
+
for row in data:
|
50 |
+
writer.writerow(row)
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
repo_url = input("Enter GitHub repository URL: ")
|
54 |
+
contents = get_github_contents(repo_url)
|
55 |
+
paths = process_contents(contents)
|
56 |
+
write_to_csv(paths, "quickjs_all.csv")
|
57 |
+
print("CSV file 'quickjs_all.csv' generated successfully.")
|
scripts/Data_fetch/list_file_types.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import mimetypes
|
3 |
+
from collections import defaultdict
|
4 |
+
import os
|
5 |
+
|
6 |
+
GITHUB_TOKEN = "" # Replace with your actual GitHub personal access token
|
7 |
+
|
8 |
+
def get_github_contents(repo_url):
|
9 |
+
parts = repo_url.rstrip('/').split('/')
|
10 |
+
user = parts[-2]
|
11 |
+
repo = parts[-1]
|
12 |
+
|
13 |
+
api_url = f"https://api.github.com/repos/{user}/{repo}/contents/"
|
14 |
+
|
15 |
+
headers = {
|
16 |
+
"Authorization": f"token {GITHUB_TOKEN}"
|
17 |
+
}
|
18 |
+
|
19 |
+
response = requests.get(api_url, headers=headers)
|
20 |
+
response.raise_for_status()
|
21 |
+
|
22 |
+
return response.json()
|
23 |
+
|
24 |
+
def list_file_extensions(contents, file_extensions=None, parent_path=""):
|
25 |
+
if file_extensions is None:
|
26 |
+
file_extensions = defaultdict(int)
|
27 |
+
|
28 |
+
for item in contents:
|
29 |
+
path = parent_path + item['name']
|
30 |
+
|
31 |
+
if item['type'] == 'dir':
|
32 |
+
dir_url = item['url']
|
33 |
+
headers = {
|
34 |
+
"Authorization": f"token {GITHUB_TOKEN}"
|
35 |
+
}
|
36 |
+
dir_response = requests.get(dir_url, headers=headers)
|
37 |
+
|
38 |
+
if dir_response.status_code == 200:
|
39 |
+
dir_contents = dir_response.json()
|
40 |
+
if isinstance(dir_contents, list):
|
41 |
+
list_file_extensions(dir_contents, file_extensions, path + "/")
|
42 |
+
else:
|
43 |
+
print(f"Unexpected directory contents at {path}: {dir_contents}")
|
44 |
+
else:
|
45 |
+
print(f"Failed to fetch directory contents at {path}. Status code: {dir_response.status_code}")
|
46 |
+
else:
|
47 |
+
# Get the file extension
|
48 |
+
_, file_extension = os.path.splitext(item['name'])
|
49 |
+
|
50 |
+
if file_extension:
|
51 |
+
file_extensions[file_extension] += 1
|
52 |
+
else:
|
53 |
+
file_extensions["no_extension"] += 1
|
54 |
+
|
55 |
+
return file_extensions
|
56 |
+
|
57 |
+
def get_file_extensions_in_repo(repo_url):
|
58 |
+
contents = get_github_contents(repo_url)
|
59 |
+
file_extensions = list_file_extensions(contents)
|
60 |
+
return dict(file_extensions)
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
repo_url = input("Enter GitHub repository URL: ")
|
64 |
+
file_extensions = get_file_extensions_in_repo(repo_url)
|
65 |
+
for extension, count in file_extensions.items():
|
66 |
+
print(f"{extension}: {count} files")
|
scripts/Data_fetch/local_parser.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import csv
|
3 |
+
import sys
|
4 |
+
|
5 |
+
csv.field_size_limit(10**9)
|
6 |
+
|
7 |
+
def process_local_repo(repo_path, paths=[]):
|
8 |
+
for root, dirs, files in os.walk(repo_path):
|
9 |
+
for file in files:
|
10 |
+
file_path = os.path.join(root, file)
|
11 |
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
12 |
+
file_content = f.read()
|
13 |
+
relative_path = os.path.relpath(file_path, repo_path)
|
14 |
+
extension = os.path.splitext(relative_path)[1]
|
15 |
+
if extension == '.md':
|
16 |
+
formatted_content = f"The following is a markdown document located at {relative_path}\n------\n{file_content}\n------"
|
17 |
+
elif extension == '.rs':
|
18 |
+
formatted_content = f"```rust:{relative_path}\n{file_content}\n```"
|
19 |
+
elif extension == '.css':
|
20 |
+
formatted_content = f"```css:{relative_path}\n{file_content}\n```"
|
21 |
+
elif extension == '.html':
|
22 |
+
formatted_content = f"```html:{relative_path}\n{file_content}\n```"
|
23 |
+
elif extension == '.json':
|
24 |
+
formatted_content = f"```json:{relative_path}\n{file_content}\n```"
|
25 |
+
elif extension == '.sh':
|
26 |
+
formatted_content = f"```bash:{relative_path}\n{file_content}\n```"
|
27 |
+
elif extension == '.py':
|
28 |
+
formatted_content = f"```python:{relative_path}\n{file_content}\n```"
|
29 |
+
elif extension == '.js':
|
30 |
+
formatted_content = f"```javascript:{relative_path}\n{file_content}\n```"
|
31 |
+
else:
|
32 |
+
formatted_content = f"The following document is located at {relative_path}\n------\n{file_content}\n------"
|
33 |
+
paths.append({"FormattedContent": formatted_content})
|
34 |
+
return paths
|
35 |
+
|
36 |
+
def write_to_csv(data, output_file):
|
37 |
+
with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
|
38 |
+
writer = csv.writer(csvfile)
|
39 |
+
for row in data:
|
40 |
+
writer.writerow([row['FormattedContent']])
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
repo_path = input("Enter the local repository path: ")
|
44 |
+
paths = process_local_repo(repo_path)
|
45 |
+
output_csv = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\docs\qucik_js_js.csv"
|
46 |
+
write_to_csv(paths, output_csv)
|
47 |
+
print(f"CSV file '{output_csv}' generated successfully.")
|
scripts/Data_fetch/ocr.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import csv
|
3 |
+
import mimetypes
|
4 |
+
from io import BytesIO
|
5 |
+
from PIL import Image
|
6 |
+
from surya.ocr import run_ocr
|
7 |
+
from surya.model.detection.model import load_model as load_det_model, load_processor as load_det_processor
|
8 |
+
from surya.model.recognition.model import load_model as load_rec_model
|
9 |
+
from surya.model.recognition.processor import load_processor as load_rec_processor
|
10 |
+
|
11 |
+
def get_github_contents(repo_url):
|
12 |
+
parts = repo_url.rstrip('/').split('/')
|
13 |
+
user = parts[-2]
|
14 |
+
repo = parts[-1]
|
15 |
+
|
16 |
+
api_url = f"https://api.github.com/repos/{user}/{repo}/contents/"
|
17 |
+
|
18 |
+
response = requests.get(api_url)
|
19 |
+
response.raise_for_status()
|
20 |
+
|
21 |
+
return response.json()
|
22 |
+
|
23 |
+
def process_contents(contents, paths=[], parent_path=""):
|
24 |
+
langs = ["en"]
|
25 |
+
det_processor, det_model = load_det_processor(), load_det_model()
|
26 |
+
rec_model, rec_processor = load_rec_model(), load_rec_processor()
|
27 |
+
|
28 |
+
for item in contents:
|
29 |
+
path = parent_path + item['name']
|
30 |
+
if item['type'] == 'dir':
|
31 |
+
dir_contents = requests.get(item['url']).json()
|
32 |
+
process_contents(dir_contents, paths, path + "/")
|
33 |
+
else:
|
34 |
+
mime_type, _ = mimetypes.guess_type(item['name'])
|
35 |
+
if mime_type and mime_type.split('/')[0] == 'image':
|
36 |
+
image_content = requests.get(item['download_url']).content
|
37 |
+
image = Image.open(BytesIO(image_content))
|
38 |
+
predictions = run_ocr([image], [langs], det_model, det_processor, rec_model, rec_processor)
|
39 |
+
paths.append({"path": path, "content": ""})
|
40 |
+
|
41 |
+
return paths
|
42 |
+
|
43 |
+
def write_to_csv(data, output_file):
|
44 |
+
if data:
|
45 |
+
with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
|
46 |
+
fieldnames = ['path', 'content']
|
47 |
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
48 |
+
|
49 |
+
writer.writeheader()
|
50 |
+
for row in data:
|
51 |
+
writer.writerow(row)
|
52 |
+
else:
|
53 |
+
print("No data to write to CSV.")
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
repo_url = input("Enter GitHub repository URL: ")
|
57 |
+
contents = get_github_contents(repo_url)
|
58 |
+
paths = process_contents(contents)
|
59 |
+
write_to_csv(paths, "repo_ocr.csv")
|
60 |
+
print(f"CSV file 'repo_ocr.csv' generated successfully with {len(paths)} entries.")
|
scripts/Data_fetch/pdf_parser.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import csv
|
3 |
+
import os
|
4 |
+
import mimetypes
|
5 |
+
import PyPDF2
|
6 |
+
def get_github_contents(repo_url):
|
7 |
+
parts = repo_url.rstrip('/').split('/')
|
8 |
+
user = parts[-2]
|
9 |
+
repo = parts[-1]
|
10 |
+
api_url = f"https://api.github.com/repos/{user}/{repo}/contents/"
|
11 |
+
response = requests.get(api_url)
|
12 |
+
response.raise_for_status()
|
13 |
+
|
14 |
+
return response.json()
|
15 |
+
def process_contents(contents, paths=[], parent_path=""):
|
16 |
+
for item in contents:
|
17 |
+
path = parent_path + item['name']
|
18 |
+
if item['type'] == 'dir':
|
19 |
+
dir_contents = requests.get(item['url']).json()
|
20 |
+
process_contents(dir_contents, paths, path + "/")
|
21 |
+
else:
|
22 |
+
extension = '.' + item['name'].split('.')[-1] if '.' in item['name'] else ''
|
23 |
+
if extension == '.pdf':
|
24 |
+
file_response = requests.get(item['download_url'])
|
25 |
+
pdf_path = "Output/" + item['name']
|
26 |
+
# Save the PDF locally
|
27 |
+
with open(pdf_path, 'wb') as f:
|
28 |
+
f.write(file_response.content)
|
29 |
+
paths.append({"path": pdf_path, "content": file_response.content})
|
30 |
+
return paths
|
31 |
+
|
32 |
+
def extract_text_from_pdf(pdf_file_path):
|
33 |
+
pdf_reader = PyPDF2.PdfReader(pdf_file_path)
|
34 |
+
pages_content = []
|
35 |
+
for page_num in range(len(pdf_reader.pages)):
|
36 |
+
page = pdf_reader.pages[page_num]
|
37 |
+
text = page.extract_text()
|
38 |
+
pages_content.append(text)
|
39 |
+
return pages_content
|
40 |
+
|
41 |
+
def save_pages_to_csv(pages_content, output_csv_file):
|
42 |
+
with open(output_csv_file, 'w', newline='', encoding='utf-8') as file:
|
43 |
+
writer = csv.writer(file)
|
44 |
+
writer.writerow(["Page", "Content"])
|
45 |
+
for i, content in enumerate(pages_content):
|
46 |
+
writer.writerow([i + 1, content])
|
47 |
+
|
48 |
+
if __name__ == "__main__":
|
49 |
+
repo_url = input("Enter GitHub repository URL: ")
|
50 |
+
contents = get_github_contents(repo_url)
|
51 |
+
paths = process_contents(contents)
|
52 |
+
|
53 |
+
for pdf_data in paths:
|
54 |
+
pdf_file_path = pdf_data["path"]
|
55 |
+
print(f"Processing {pdf_file_path}")
|
56 |
+
pages_content = extract_text_from_pdf(pdf_file_path)
|
57 |
+
csv_output_path = pdf_file_path.replace('.pdf', '_pages.csv')
|
58 |
+
save_pages_to_csv(pages_content, csv_output_path)
|
59 |
+
print(f"Extracted content from {pdf_file_path} and saved to {csv_output_path}")
|
60 |
+
print("All PDF files have been processed and converted to CSV.")
|
scripts/Data_fetch/readme.md
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1. List_file_types will help to list various file types present in a repo.( Important for knowing various file types present inside the repo)
|
2 |
+
2. Repo_parser allows to copy all the contents of the repo into a csv file with path of various files in 1 column and it's contents in another.
|
3 |
+
3. Code_parser allows to copy only those files that have code of specific languages. (Useful when a particular language or a framework is to be targeted).
|
4 |
+
4. OCR to convert images or other such files with text data in the background.
|
5 |
+
5. PDF_parser allows to fetch PDF files and convert them to a csv file with path, page number and contents as columns.
|
scripts/Data_fetch/repo2text_parser.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
def parse_text_file(input_file):
|
5 |
+
data = []
|
6 |
+
current_path = None
|
7 |
+
current_content = []
|
8 |
+
|
9 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
10 |
+
lines = f.readlines()
|
11 |
+
|
12 |
+
i = 0
|
13 |
+
while i < len(lines):
|
14 |
+
line = lines[i].strip()
|
15 |
+
|
16 |
+
# Check for file start marker
|
17 |
+
if line == '---' and i + 1 < len(lines) and 'File:' in lines[i + 1]:
|
18 |
+
# If we have existing content, save it
|
19 |
+
if current_path and current_content:
|
20 |
+
data.append({
|
21 |
+
"Path": current_path,
|
22 |
+
"Content": ''.join(current_content)
|
23 |
+
})
|
24 |
+
current_content = []
|
25 |
+
|
26 |
+
# Extract the new file path
|
27 |
+
current_path = lines[i + 1].split('File:')[1].strip()
|
28 |
+
i += 2 # Skip the File: line
|
29 |
+
|
30 |
+
# Skip the closing '---' of the file header if it exists
|
31 |
+
if i < len(lines) and lines[i].strip() == '---':
|
32 |
+
i += 1
|
33 |
+
|
34 |
+
continue
|
35 |
+
|
36 |
+
# If we have a current path, collect all content including front matter
|
37 |
+
if current_path:
|
38 |
+
current_content.append(lines[i] + '\n')
|
39 |
+
|
40 |
+
i += 1
|
41 |
+
|
42 |
+
# Don't forget to add the last file
|
43 |
+
if current_path and current_content:
|
44 |
+
data.append({
|
45 |
+
"Path": current_path,
|
46 |
+
"Content": ''.join(current_content)
|
47 |
+
})
|
48 |
+
|
49 |
+
return data
|
50 |
+
|
51 |
+
def transform_and_write_csv(data, output_csv):
|
52 |
+
with open(output_csv, mode='w', newline='', encoding='utf-8') as outfile:
|
53 |
+
writer = csv.writer(outfile)
|
54 |
+
for row in data:
|
55 |
+
path = row.get('Path')
|
56 |
+
if not path:
|
57 |
+
continue
|
58 |
+
|
59 |
+
content = row.get('Content', '')
|
60 |
+
extension = os.path.splitext(path)[1].lower()
|
61 |
+
|
62 |
+
if extension == '.md':
|
63 |
+
new_content = f"The following is a markdown document located at {path}\n------\n{content}\n------"
|
64 |
+
elif extension == '.rs':
|
65 |
+
new_content = f"```rust:{path}\n{content}\n```"
|
66 |
+
elif extension == '.sh':
|
67 |
+
new_content = f"```bash:{path}\n{content}\n```"
|
68 |
+
elif extension == '.py':
|
69 |
+
new_content = f"```python:{path}\n{content}\n```"
|
70 |
+
elif extension == '.js':
|
71 |
+
new_content = f"```javascript:{path}\n{content}\n```"
|
72 |
+
elif extension == '.json':
|
73 |
+
new_content = f"```json:{path}\n{content}\n```"
|
74 |
+
elif extension == '.txt':
|
75 |
+
new_content = f"The following is a plain text file located at {path}\n------\n{content}\n------"
|
76 |
+
elif extension == '.toml':
|
77 |
+
new_content = f"```toml:{path}\n{content}\n```"
|
78 |
+
elif extension == '.jsx':
|
79 |
+
new_content = f"```jsx:{path}\n{content}\n```"
|
80 |
+
elif extension == '.css':
|
81 |
+
new_content = f"```css:{path}\n{content}\n```"
|
82 |
+
elif extension == '.java':
|
83 |
+
new_content = f"```java:{path}\n{content}\n```"
|
84 |
+
elif extension == '.hpp':
|
85 |
+
new_content = f"```hpp:{path}\n{content}\n```"
|
86 |
+
elif extension == '.c':
|
87 |
+
new_content = f"```c:{path}\n{content}\n```"
|
88 |
+
elif extension == '.yml':
|
89 |
+
new_content = f"```yml:{path}\n{content}\n```"
|
90 |
+
elif extension == '.xml':
|
91 |
+
new_content = f"```xml:{path}\n{content}\n```"
|
92 |
+
else:
|
93 |
+
new_content = f"The following document is located at {path}\n------\n{content}\n------"
|
94 |
+
writer.writerow([new_content])
|
95 |
+
|
96 |
+
if __name__ == "__main__":
|
97 |
+
input_file = input("Enter the path of your text file: ")
|
98 |
+
final_output_csv = "wasmedge.csv"
|
99 |
+
parsed_data = parse_text_file(input_file)
|
100 |
+
transform_and_write_csv(parsed_data, final_output_csv)
|
101 |
+
print(f"Transformed CSV file '{final_output_csv}' generated successfully.")
|
scripts/Data_fetch/repopack_parser.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
def parse_text_file(input_file):
|
5 |
+
data = []
|
6 |
+
current_path = None
|
7 |
+
current_content = []
|
8 |
+
|
9 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
10 |
+
lines = f.readlines()
|
11 |
+
inside_file_block = False
|
12 |
+
|
13 |
+
for line in lines:
|
14 |
+
if line.strip() == '================':
|
15 |
+
if inside_file_block and current_path and current_content:
|
16 |
+
data.append({
|
17 |
+
"Path": current_path.strip(),
|
18 |
+
"Content": ''.join(current_content).strip()
|
19 |
+
})
|
20 |
+
current_path = None
|
21 |
+
current_content = []
|
22 |
+
inside_file_block = False
|
23 |
+
elif line.startswith('File:'):
|
24 |
+
current_path = line.split('File: ')[1].strip()
|
25 |
+
inside_file_block = True
|
26 |
+
else:
|
27 |
+
if inside_file_block:
|
28 |
+
current_content.append(line)
|
29 |
+
|
30 |
+
if current_path and current_content:
|
31 |
+
data.append({
|
32 |
+
"Path": current_path.strip(),
|
33 |
+
"Content": ''.join(current_content).strip()
|
34 |
+
})
|
35 |
+
return data
|
36 |
+
|
37 |
+
def transform_and_write_csv(data, output_csv):
|
38 |
+
with open(output_csv, mode='w', newline='', encoding='utf-8') as outfile:
|
39 |
+
writer = csv.writer(outfile)
|
40 |
+
for row in data:
|
41 |
+
path = row['Path']
|
42 |
+
content = row['Content']
|
43 |
+
extension = os.path.splitext(path)[1]
|
44 |
+
|
45 |
+
if extension == '.md':
|
46 |
+
new_content = f"The following is a markdown document located at {path}\n------\n{content}\n------"
|
47 |
+
elif extension == '.rs':
|
48 |
+
new_content = f"```rust:{path}\n{content}\n```"
|
49 |
+
elif extension == '.sh':
|
50 |
+
new_content = f"```bash:{path}\n{content}\n```"
|
51 |
+
elif extension == '.py':
|
52 |
+
new_content = f"```python:{path}\n{content}\n```"
|
53 |
+
elif extension == '.js':
|
54 |
+
new_content = f"```javascript:{path}\n{content}\n```"
|
55 |
+
elif extension == '.json':
|
56 |
+
new_content = f"```json:{path}\n{content}\n```"
|
57 |
+
elif extension == '.txt':
|
58 |
+
new_content = f"The following is a plain text file located at {path}\n------\n{content}\n------"
|
59 |
+
elif extension == '.toml':
|
60 |
+
new_content = f"```toml:{path}\n{content}\n```"
|
61 |
+
elif extension == '.jsx':
|
62 |
+
new_content = f"```jsx:{path}\n{content}\n```"
|
63 |
+
elif extension == '.css':
|
64 |
+
new_content = f"```css:{path}\n{content}\n```"
|
65 |
+
elif extension == '.java':
|
66 |
+
new_content == f"```java:{path}\n{content}\n```"
|
67 |
+
elif extension == '.hpp':
|
68 |
+
f"```hpp:{path}\n{content}\n```"
|
69 |
+
elif extension == '.c':
|
70 |
+
f"```c:{path}\n{content}\n```"
|
71 |
+
elif extension == '.yml':
|
72 |
+
new_content == f"```yml:{path}\n{content}\n```"
|
73 |
+
elif extension == '.xml':
|
74 |
+
new_content == f"```xml:{path}\n{content}\n```"
|
75 |
+
else:
|
76 |
+
new_content = f"The following document is located at {path}\n------\n{content}\n------"
|
77 |
+
writer.writerow([new_content])
|
78 |
+
|
79 |
+
if __name__ == "__main__":
|
80 |
+
input_file = input("Enter the path of your text file: ")
|
81 |
+
final_output_csv = "wasmedge.csv"
|
82 |
+
parsed_data = parse_text_file(input_file)
|
83 |
+
transform_and_write_csv(parsed_data, final_output_csv)
|
84 |
+
print(f"Transformed CSV file '{final_output_csv}' generated successfully.")
|
scripts/Helper/chunking.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
df = pd.read_csv('Output/summary/eth_md_summary.csv')
|
4 |
+
def truncate_content(content, max_tokens=7000):
|
5 |
+
words = content.split()
|
6 |
+
return ' '.join(words[:max_tokens])
|
7 |
+
|
8 |
+
df['Content'] = df['Content'].apply(lambda x: truncate_content(x))
|
9 |
+
df['Summary and Q&A'] = df['Summary and Q&A'].apply(lambda x: truncate_content(x))
|
10 |
+
|
11 |
+
df.to_csv('Output/summary/eth_md_summary_trun.csv', index=False)
|
12 |
+
|
13 |
+
'''
|
14 |
+
import pandas as pd
|
15 |
+
|
16 |
+
df = pd.read_csv('input_file.csv')
|
17 |
+
|
18 |
+
def split_content(row, max_words=5000):
|
19 |
+
content = row['Content']
|
20 |
+
words = content.split()
|
21 |
+
chunks = [words[i:i + max_words] for i in range(0, len(words), max_words)]
|
22 |
+
return [{'Path': row['Path'], 'Content': ' '.join(chunk)} for chunk in chunks]
|
23 |
+
|
24 |
+
new_rows = []
|
25 |
+
|
26 |
+
for index, row in df.iterrows():
|
27 |
+
new_rows.extend(split_content(row))
|
28 |
+
|
29 |
+
new_df = pd.DataFrame(new_rows)
|
30 |
+
|
31 |
+
new_df.to_csv('output_file.csv', index=False)
|
32 |
+
'''
|
scripts/Helper/cleaner.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
file_path = '/home/aru/Desktop/Github_analyser/Output/split_summary/wasmedge_split.csv'
|
4 |
+
df = pd.read_csv(file_path)
|
5 |
+
|
6 |
+
df_cleaned = df.dropna(subset=['Content'])
|
7 |
+
|
8 |
+
output_file_path = '/home/aru/Desktop/Github_analyser/Output/split_summary/wasmedge_split_cleam.csv'
|
9 |
+
df_cleaned.to_csv(output_file_path, index=False)
|
10 |
+
|
11 |
+
print("Rows with empty 'Content' have been removed.")
|
scripts/Helper/count.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
|
3 |
+
def count_words_in_csv(input_file, output_file):
|
4 |
+
# Set the CSV field size limit to a large value, avoiding the overflow issue
|
5 |
+
csv.field_size_limit(10**7) # 10 million, a high but safe limit
|
6 |
+
|
7 |
+
# Open the input file with utf-8 encoding
|
8 |
+
with open(input_file, 'r', encoding='utf-8') as infile:
|
9 |
+
reader = csv.reader(infile)
|
10 |
+
|
11 |
+
# Prepare the data for the output file
|
12 |
+
output_data = [["Original Content", "Word Count"]]
|
13 |
+
|
14 |
+
# Iterate through each row, count words, and add to output data
|
15 |
+
for row in reader:
|
16 |
+
content = row[0] # Get the content from the single column
|
17 |
+
word_count = len(content.split()) # Count the words
|
18 |
+
output_data.append([content, word_count])
|
19 |
+
|
20 |
+
# Write the output data to the output CSV file
|
21 |
+
with open(output_file, 'w', newline='', encoding='utf-8') as outfile:
|
22 |
+
writer = csv.writer(outfile)
|
23 |
+
writer.writerows(output_data)
|
24 |
+
|
25 |
+
print(f"Word count added to '{output_file}' successfully.")
|
26 |
+
|
27 |
+
# Usage example
|
28 |
+
count_words_in_csv(r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\docs\quick_js_js.csv", "quickjsjs_count.csv")
|
scripts/Helper/counter.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
# Read the CSV, forcing it into a single column by specifying an unusual separator
|
4 |
+
df = pd.read_csv(r'C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\final_repo\llamaedge_repopack.csv', sep='\n', header=None)
|
5 |
+
|
6 |
+
# Rename the column to 'Content'
|
7 |
+
df.columns = ['Content']
|
8 |
+
|
9 |
+
# Define the word count function
|
10 |
+
def count_words(text):
|
11 |
+
if isinstance(text, str):
|
12 |
+
return len(text.split())
|
13 |
+
else:
|
14 |
+
return 0
|
15 |
+
|
16 |
+
# Apply the word count function and add the result as a new column
|
17 |
+
df['Content_Word_Count'] = df['Content'].apply(count_words)
|
18 |
+
|
19 |
+
# Write to a new CSV without headers
|
20 |
+
df.to_csv('wasmedge_quickjs.csv', index=False, header=False)
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
'''
|
26 |
+
import pandas as pd
|
27 |
+
from transformers import AutoModel
|
28 |
+
|
29 |
+
model = AutoModel.from_pretrained("Xenova/gpt-4")
|
30 |
+
|
31 |
+
tokenizer = GPT2TokenizerFast.from_pretrained('Xenova/gpt-4')
|
32 |
+
|
33 |
+
|
34 |
+
df = pd.read_csv('/home/aru/Desktop/Github_analyser/Output/summary/eth_md_summary.csv')
|
35 |
+
|
36 |
+
def count_words(text):
|
37 |
+
return len(text.split())
|
38 |
+
|
39 |
+
def count_tokens(text):
|
40 |
+
tokens = tokenizer.encode(text)
|
41 |
+
return len(tokens)
|
42 |
+
|
43 |
+
df['Content_Word_Count'] = df['Content'].apply(count_words)
|
44 |
+
df['Summary_QnA_Word_Count'] = df['Summary and Q&A'].apply(count_words)
|
45 |
+
|
46 |
+
df['Content_Token_Count'] = df['Content'].apply(count_tokens)
|
47 |
+
df['Summary_QnA_Token_Count'] = df['Summary and Q&A'].apply(count_tokens)
|
48 |
+
|
49 |
+
df.to_csv('output_file.csv', index=False)
|
50 |
+
'''
|
scripts/Helper/local_csv.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
|
5 |
+
csv.field_size_limit(10**9)
|
6 |
+
|
7 |
+
input_csv = r''
|
8 |
+
output_csv = r''
|
9 |
+
|
10 |
+
with open(input_csv, mode='r', newline='', encoding='utf-8') as infile, \
|
11 |
+
open(output_csv, mode='w', newline='', encoding='utf-8') as outfile:
|
12 |
+
|
13 |
+
reader = csv.DictReader(infile)
|
14 |
+
writer = csv.writer(outfile)
|
15 |
+
|
16 |
+
for row in reader:
|
17 |
+
path = row['Path']
|
18 |
+
content = row['Content']
|
19 |
+
extension = os.path.splitext(path)[1]
|
20 |
+
if extension == '.md':
|
21 |
+
new_content = f"The following is a markdown document located at {path}\n------\n{content}\n------"
|
22 |
+
elif extension == '.rs':
|
23 |
+
new_content = f"```rust:{path}\n{content}\n```"
|
24 |
+
elif extension == '.sh':
|
25 |
+
new_content = f"```bash:{path}\n{content}\n```"
|
26 |
+
elif extension == '.py':
|
27 |
+
new_content = f"```python:{path}\n{content}\n```"
|
28 |
+
else:
|
29 |
+
new_content = new_content = f"The following document is located at {path}\n------\n{content}\n------"
|
30 |
+
writer.writerow([new_content])
|
31 |
+
print(f"Transformed CSV has been written to {output_csv}")
|
scripts/Summary/checkpoint.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import csv
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
from datetime import datetime
|
7 |
+
csv.field_size_limit(10**9)
|
8 |
+
|
9 |
+
API_BASE_URL = "https://llama.us.gaianet.network/v1"
|
10 |
+
MODEL_NAME = "llama"
|
11 |
+
API_KEY = "GAIA"
|
12 |
+
|
13 |
+
def save_checkpoint(checkpoint_file, processed_row, processed_contents):
|
14 |
+
checkpoint_data = {
|
15 |
+
'last_processed_row': processed_row,
|
16 |
+
'processed_contents': list(processed_contents)
|
17 |
+
}
|
18 |
+
with open(checkpoint_file, 'w') as f:
|
19 |
+
json.dump(checkpoint_data, f)
|
20 |
+
|
21 |
+
def load_checkpoint(checkpoint_file):
|
22 |
+
if os.path.exists(checkpoint_file):
|
23 |
+
with open(checkpoint_file, 'r') as f:
|
24 |
+
checkpoint_data = json.load(f)
|
25 |
+
return (
|
26 |
+
checkpoint_data['last_processed_row'],
|
27 |
+
set(checkpoint_data['processed_contents'])
|
28 |
+
)
|
29 |
+
return 0, set()
|
30 |
+
|
31 |
+
def create_backup(file_path):
|
32 |
+
"""Create a backup of the output file with timestamp"""
|
33 |
+
if os.path.exists(file_path):
|
34 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
35 |
+
backup_path = f"{file_path}.{timestamp}.bak"
|
36 |
+
os.rename(file_path, backup_path)
|
37 |
+
print(f"Created backup at: {backup_path}")
|
38 |
+
|
39 |
+
def summarize(source_text, max_retries=3):
|
40 |
+
for attempt in range(max_retries):
|
41 |
+
try:
|
42 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
43 |
+
chat_completion = client.chat.completions.create(
|
44 |
+
messages=[
|
45 |
+
{
|
46 |
+
"role": "system",
|
47 |
+
"content": """
|
48 |
+
You are an AI assistant designed to review pull requests (PRs) in GitHub repositories. Your task is to:
|
49 |
+
|
50 |
+
1. Summarize Code-related Files:
|
51 |
+
- Focus on key changes in the code, including additions, deletions, and modifications.
|
52 |
+
- Capture essential details such as the purpose of the code, any new functions, classes, or methods, and the overall impact of these changes on the project.
|
53 |
+
- Highlight any dependencies, error handling, or performance implications.
|
54 |
+
|
55 |
+
2. Summarize Markdown Files:
|
56 |
+
- Extract key points from documentation, readme files, and other markdown content.
|
57 |
+
- Identify sections related to project setup, usage instructions, change logs, or contributor guidelines.
|
58 |
+
- Note updates in the documentation and the implications for users or developers.
|
59 |
+
""",
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"role": "user",
|
63 |
+
"content": source_text,
|
64 |
+
}
|
65 |
+
],
|
66 |
+
model=MODEL_NAME,
|
67 |
+
stream=False,
|
68 |
+
)
|
69 |
+
return chat_completion.choices[0].message.content
|
70 |
+
except Exception as e:
|
71 |
+
if attempt == max_retries - 1:
|
72 |
+
raise
|
73 |
+
print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
|
74 |
+
time.sleep(2 ** attempt)
|
75 |
+
|
76 |
+
def qgen(source_text, max_retries=3):
|
77 |
+
for attempt in range(max_retries):
|
78 |
+
try:
|
79 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
80 |
+
chat_completion = client.chat.completions.create(
|
81 |
+
messages=[
|
82 |
+
{
|
83 |
+
"role": "system",
|
84 |
+
"content": "Respond with a list of 10 questions. The text in the user message must contain specific answers to each question. Each question must be on its own line. Just list the questions without any introductory text or numbers.",
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"role": "user",
|
88 |
+
"content": source_text,
|
89 |
+
}
|
90 |
+
],
|
91 |
+
model=MODEL_NAME,
|
92 |
+
stream=False,
|
93 |
+
)
|
94 |
+
return chat_completion.choices[0].message.content
|
95 |
+
except Exception as e:
|
96 |
+
if attempt == max_retries - 1:
|
97 |
+
raise
|
98 |
+
print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
|
99 |
+
time.sleep(2 ** attempt)
|
100 |
+
|
101 |
+
def agen(source_text, question, max_retries=3):
|
102 |
+
for attempt in range(max_retries):
|
103 |
+
try:
|
104 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
105 |
+
chat_completion = client.chat.completions.create(
|
106 |
+
messages=[
|
107 |
+
{
|
108 |
+
"role": "system",
|
109 |
+
"content": "Give a comprehensive and well-reasoned answer to the user question strictly based on the context below and try to give a detailed explanation while answering the questions. Also try to add some bonus tip to in each answer and some relevant example outside of the content.\n" + source_text
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"role": "user",
|
113 |
+
"content": question,
|
114 |
+
}
|
115 |
+
],
|
116 |
+
model=MODEL_NAME,
|
117 |
+
stream=False,
|
118 |
+
)
|
119 |
+
return chat_completion.choices[0].message.content
|
120 |
+
except Exception as e:
|
121 |
+
if attempt == max_retries - 1:
|
122 |
+
raise
|
123 |
+
print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
|
124 |
+
time.sleep(2 ** attempt)
|
125 |
+
|
126 |
+
def main():
|
127 |
+
input_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\docs\quick_js_js.csv"
|
128 |
+
output_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\summary\quick_js_js.csv"
|
129 |
+
checkpoint_file = output_path + '.checkpoint'
|
130 |
+
|
131 |
+
last_processed_row, processed_contents = load_checkpoint(checkpoint_file)
|
132 |
+
|
133 |
+
if last_processed_row == 0 and os.path.exists(output_path):
|
134 |
+
create_backup(output_path)
|
135 |
+
|
136 |
+
row_count = last_processed_row
|
137 |
+
|
138 |
+
try:
|
139 |
+
with open(input_path, 'r', newline='', encoding='utf-8') as infile, \
|
140 |
+
open(output_path, 'a', newline='', encoding='utf-8') as outfile:
|
141 |
+
|
142 |
+
csv_reader = csv.reader(infile)
|
143 |
+
csv_writer = csv.writer(outfile)
|
144 |
+
|
145 |
+
for _ in range(last_processed_row):
|
146 |
+
next(csv_reader)
|
147 |
+
|
148 |
+
for row in csv_reader:
|
149 |
+
try:
|
150 |
+
main_content = row[0]
|
151 |
+
|
152 |
+
if main_content in processed_contents:
|
153 |
+
continue
|
154 |
+
|
155 |
+
print(f"Processing row {row_count + 1}...")
|
156 |
+
|
157 |
+
summary = summarize(main_content)
|
158 |
+
qs = qgen(main_content)
|
159 |
+
qna_list = []
|
160 |
+
|
161 |
+
for q in qs.splitlines():
|
162 |
+
if len(q.strip()) == 0:
|
163 |
+
continue
|
164 |
+
answer = agen(main_content, q)
|
165 |
+
qna_list.append(f"Q: {q}\nA: {answer}")
|
166 |
+
|
167 |
+
csv_writer.writerow([main_content, f"Summary:\n{summary}"])
|
168 |
+
for qna in qna_list:
|
169 |
+
csv_writer.writerow([main_content, qna])
|
170 |
+
|
171 |
+
processed_contents.add(main_content)
|
172 |
+
row_count += 1
|
173 |
+
|
174 |
+
save_checkpoint(checkpoint_file, row_count, processed_contents)
|
175 |
+
|
176 |
+
print(f"Successfully processed row {row_count}")
|
177 |
+
|
178 |
+
except Exception as e:
|
179 |
+
print(f"Error processing row {row_count + 1}: {str(e)}")
|
180 |
+
# Save checkpoint before raising the error
|
181 |
+
save_checkpoint(checkpoint_file, row_count, processed_contents)
|
182 |
+
raise
|
183 |
+
|
184 |
+
print(f"Processing completed. Modified data has been written to {output_path}")
|
185 |
+
print(f"Total rows processed: {row_count}")
|
186 |
+
|
187 |
+
if os.path.exists(checkpoint_file):
|
188 |
+
os.remove(checkpoint_file)
|
189 |
+
|
190 |
+
except Exception as e:
|
191 |
+
print(f"An error occurred: {str(e)}")
|
192 |
+
print(f"Progress has been saved. You can resume from row {row_count + 1}")
|
193 |
+
raise
|
194 |
+
|
195 |
+
if __name__ == "__main__":
|
196 |
+
main()
|
scripts/Summary/qna_only.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import csv
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
import logging
|
6 |
+
|
7 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
8 |
+
|
9 |
+
API_BASE_URL = "https://llama.us.gaianet.network/v1"
|
10 |
+
MODEL_NAME = "llama"
|
11 |
+
API_KEY = "GAIA"
|
12 |
+
|
13 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
14 |
+
|
15 |
+
def qgen(source_text):
|
16 |
+
try:
|
17 |
+
chat_completion = client.chat.completions.create(
|
18 |
+
messages=[
|
19 |
+
{
|
20 |
+
"role": "system",
|
21 |
+
"content": "Respond with a list of 10 questions. The text in the user message must contain specific answers to each question. Each question must be complete without references to unclear context such as 'this team' or 'that lab'. Each question must be on its own line. Just list the questions without any introductory text or numbers.",
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"role": "user",
|
25 |
+
"content": source_text,
|
26 |
+
}
|
27 |
+
],
|
28 |
+
model=MODEL_NAME,
|
29 |
+
stream=False,
|
30 |
+
)
|
31 |
+
return chat_completion.choices[0].message.content
|
32 |
+
except Exception as e:
|
33 |
+
logging.error(f"Error in generating questions: {e}")
|
34 |
+
return None
|
35 |
+
|
36 |
+
def agen(source_text, question):
|
37 |
+
try:
|
38 |
+
chat_completion = client.chat.completions.create(
|
39 |
+
messages=[
|
40 |
+
{
|
41 |
+
"role": "system",
|
42 |
+
"content": "Give a comprehensive and well-reasoned answer to the user question strictly based on the context below.\n" + source_text,
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"role": "user",
|
46 |
+
"content": question,
|
47 |
+
}
|
48 |
+
],
|
49 |
+
model=MODEL_NAME,
|
50 |
+
stream=False,
|
51 |
+
)
|
52 |
+
return chat_completion.choices[0].message.content
|
53 |
+
except Exception as e:
|
54 |
+
logging.error(f"Error in generating answer: {e}")
|
55 |
+
return None
|
56 |
+
|
57 |
+
def process_csv(input_csv, output_csv):
|
58 |
+
results = []
|
59 |
+
processed_contents = set()
|
60 |
+
|
61 |
+
if os.path.exists(output_csv):
|
62 |
+
with open(output_csv, 'r', newline='', encoding='utf-8') as outfile:
|
63 |
+
reader = csv.reader(outfile)
|
64 |
+
for row in reader:
|
65 |
+
processed_contents.add(row[0])
|
66 |
+
|
67 |
+
try:
|
68 |
+
with open(input_csv, 'r', newline='', encoding='utf-8') as csvfile_in, \
|
69 |
+
open(output_csv, 'a', newline='', encoding='utf-8') as csvfile_out:
|
70 |
+
|
71 |
+
csv_reader = csv.DictReader(csvfile_in)
|
72 |
+
fieldnames = ['Content', 'Summary and Q&A']
|
73 |
+
writer = csv.DictWriter(csvfile_out, fieldnames=fieldnames)
|
74 |
+
|
75 |
+
if not os.path.exists(output_csv) or os.stat(output_csv).st_size == 0:
|
76 |
+
writer.writeheader()
|
77 |
+
|
78 |
+
for row in csv_reader:
|
79 |
+
main_content = row['Content']
|
80 |
+
summary = row['Summary']
|
81 |
+
|
82 |
+
if main_content in processed_contents:
|
83 |
+
logging.info(f"Skipping already processed content: {main_content}")
|
84 |
+
continue
|
85 |
+
|
86 |
+
questions = qgen(main_content)
|
87 |
+
if questions is None:
|
88 |
+
logging.error(f"Skipping content due to question generation failure: {main_content}")
|
89 |
+
continue
|
90 |
+
|
91 |
+
question_list = questions.splitlines()
|
92 |
+
result = [{"Content": main_content, "Summary and Q&A": f"Summary:\n{summary}"}]
|
93 |
+
|
94 |
+
for question in question_list:
|
95 |
+
if len(question.strip()) == 0:
|
96 |
+
continue
|
97 |
+
answer = agen(main_content, question)
|
98 |
+
if answer is None:
|
99 |
+
logging.error(f"Skipping question due to answer generation failure: {question}")
|
100 |
+
continue
|
101 |
+
result.append({"Content": main_content, "Summary and Q&A": f"Q: {question}\nA: {answer}"})
|
102 |
+
|
103 |
+
for res in result:
|
104 |
+
writer.writerow(res)
|
105 |
+
csvfile_out.flush()
|
106 |
+
|
107 |
+
logging.info(f"Processed and saved content: {main_content}")
|
108 |
+
|
109 |
+
except Exception as e:
|
110 |
+
logging.error(f"Error processing CSV: {e}")
|
111 |
+
|
112 |
+
if __name__ == "__main__":
|
113 |
+
if len(sys.argv) != 3:
|
114 |
+
logging.error("Usage: python script.py <input_csv> <output_csv>")
|
115 |
+
sys.exit(1)
|
116 |
+
|
117 |
+
input_csv_file = sys.argv[1]
|
118 |
+
output_csv_file = sys.argv[2]
|
119 |
+
|
120 |
+
process_csv(input_csv_file, output_csv_file)
|
scripts/Summary/readme.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1. code_summarizer.py takes input from output folder a csv that has path and content as summarizes it and create a new csv containing content and summary as headers.
|
2 |
+
|
3 |
+
2. qna.py generates a new summary file from the above summary file that can be used to generate multiple vector embeddings from 1 file only.
|
4 |
+
|
5 |
+
3. openai_summary.py combines the above 2 step and directly generates the summary file from the raw CSV file but it uses openai api.
|
6 |
+
|
7 |
+
4. final_split_summary.py also does the similar function but it uses gaianet endpoints instead of openai api.
|
8 |
+
|
9 |
+
The key difference between the above 2 files is the use of APIs from open source models and proprietary models.
|
scripts/Summary/summarizer(openai).py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
import openai
|
4 |
+
from openai import OpenAI
|
5 |
+
client = OpenAI(api_key='')
|
6 |
+
MODEL_NAME = "gpt-4o"
|
7 |
+
|
8 |
+
def summarize(source_text):
|
9 |
+
chat_completion = client.chat.completions.create(model=MODEL_NAME,
|
10 |
+
messages=[
|
11 |
+
{
|
12 |
+
"role": "system",
|
13 |
+
"content": """
|
14 |
+
You are an AI assistant designed to review pull requests (PRs) in GitHub repositories. Your task is to:
|
15 |
+
|
16 |
+
1. Summarize Code-related Files:
|
17 |
+
- Focus on key changes in the code, including additions, deletions, and modifications.
|
18 |
+
- Capture essential details such as the purpose of the code, any new functions, classes, or methods, and the overall impact of these changes on the project.
|
19 |
+
- Highlight any dependencies, error handling, or performance implications.
|
20 |
+
|
21 |
+
2. Summarize Markdown Files:
|
22 |
+
- Extract key points from documentation, readme files, and other markdown content.
|
23 |
+
- Identify sections related to project setup, usage instructions, change logs, or contributor guidelines.
|
24 |
+
- Note updates in the documentation and the implications for users or developers.
|
25 |
+
""",
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"role": "user",
|
29 |
+
"content": source_text,
|
30 |
+
}
|
31 |
+
])
|
32 |
+
return chat_completion.choices[0].message.content
|
33 |
+
|
34 |
+
def qgen(source_text):
|
35 |
+
chat_completion = client.chat.completions.create(model=MODEL_NAME,
|
36 |
+
messages=[
|
37 |
+
{
|
38 |
+
"role": "system",
|
39 |
+
"content": "Respond with a list of 10 questions. The text in the user message must contain specific answers to each question. Each question must be on its own line. Just list the questions without any introductory text or numbers.",
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"role": "user",
|
43 |
+
"content": source_text,
|
44 |
+
}
|
45 |
+
])
|
46 |
+
return chat_completion.choices[0].message.content
|
47 |
+
|
48 |
+
def agen(source_text, question):
|
49 |
+
chat_completion = client.chat.completions.create(model=MODEL_NAME,
|
50 |
+
messages=[
|
51 |
+
{
|
52 |
+
"role": "system",
|
53 |
+
"content": (
|
54 |
+
"Give a comprehensive and well-reasoned answer to the user question strictly based on the context below "
|
55 |
+
"and try to give a detailed explanation while answering the questions. Also try to add some bonus tip to "
|
56 |
+
"each answer and some relevant example outside of the content.\n\nContext:\n" + source_text
|
57 |
+
),
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"role": "user",
|
61 |
+
"content": question,
|
62 |
+
}
|
63 |
+
])
|
64 |
+
return chat_completion.choices[0].message.content
|
65 |
+
|
66 |
+
def main():
|
67 |
+
input_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\Output\main_repos\gaianet_md_2.csv"
|
68 |
+
output_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\Output\split_summary\gaianet_md_open_new.csv"
|
69 |
+
processed_contents = set()
|
70 |
+
output_file_exists = os.path.exists(output_path)
|
71 |
+
|
72 |
+
if output_file_exists:
|
73 |
+
with open(output_path, 'r', newline='', encoding='utf-8') as csvfile:
|
74 |
+
csv_reader = csv.DictReader(csvfile)
|
75 |
+
for row in csv_reader:
|
76 |
+
processed_contents.add(row['Content'])
|
77 |
+
|
78 |
+
row_count = 0
|
79 |
+
|
80 |
+
with open(input_path, 'r', newline='', encoding='utf-8') as csvfile_in, \
|
81 |
+
open(output_path, 'a', newline='', encoding='utf-8') as csvfile_out:
|
82 |
+
|
83 |
+
csv_reader = csv.DictReader(csvfile_in)
|
84 |
+
fieldnames = ["Content", "Summary and Q&A"]
|
85 |
+
writer = csv.DictWriter(csvfile_out, fieldnames=fieldnames)
|
86 |
+
|
87 |
+
if not output_file_exists:
|
88 |
+
writer.writeheader()
|
89 |
+
|
90 |
+
for row in csv_reader:
|
91 |
+
main_content = row['Content']
|
92 |
+
|
93 |
+
if main_content in processed_contents:
|
94 |
+
continue
|
95 |
+
|
96 |
+
summary = summarize(main_content)
|
97 |
+
qs = qgen(main_content)
|
98 |
+
|
99 |
+
if summary.strip():
|
100 |
+
writer.writerow({"Content": main_content, "Summary and Q&A": f"Summary:\n{summary}"})
|
101 |
+
|
102 |
+
for q in qs.strip().split('\n'):
|
103 |
+
if q.strip():
|
104 |
+
answer = agen(main_content, q)
|
105 |
+
if answer.strip():
|
106 |
+
writer.writerow({"Content": main_content, "Summary and Q&A": f"Q: {q}\nA: {answer}"})
|
107 |
+
|
108 |
+
processed_contents.add(main_content)
|
109 |
+
row_count += 1
|
110 |
+
print(f"Processed row {row_count}")
|
111 |
+
|
112 |
+
print(f"Modified data has been written to {output_path}")
|
113 |
+
print(f"Total rows summarized: {row_count}")
|
114 |
+
|
115 |
+
if __name__ == "__main__":
|
116 |
+
main()
|
scripts/Summary/summarizer.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import csv
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
|
6 |
+
csv.field_size_limit(10**9)
|
7 |
+
|
8 |
+
API_BASE_URL = "https://llama.us.gaianet.network/v1"
|
9 |
+
MODEL_NAME = "llama"
|
10 |
+
API_KEY = "GAIA"
|
11 |
+
|
12 |
+
def create_retry_decorator():
|
13 |
+
return retry(
|
14 |
+
retry=retry_if_exception_type((openai.APIError, openai.APITimeoutError)),
|
15 |
+
stop=stop_after_attempt(3),
|
16 |
+
wait=wait_exponential(multiplier=1, min=4, max=10),
|
17 |
+
before_sleep=lambda retry_state: print(f"Retry attempt {retry_state.attempt_number} after {retry_state.outcome.exception()}")
|
18 |
+
)
|
19 |
+
|
20 |
+
@create_retry_decorator()
|
21 |
+
def make_api_call(client, messages, model):
|
22 |
+
return client.chat.completions.create(
|
23 |
+
messages=messages,
|
24 |
+
model=model,
|
25 |
+
stream=False,
|
26 |
+
)
|
27 |
+
|
28 |
+
def summarize(source_text):
|
29 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
30 |
+
messages = [
|
31 |
+
{
|
32 |
+
"role": "system",
|
33 |
+
"content": """
|
34 |
+
You are an AI assistant designed to review pull requests (PRs) in GitHub repositories. Your task is to:
|
35 |
+
|
36 |
+
1. Summarize Code-related Files:
|
37 |
+
- Focus on key changes in the code, including additions, deletions, and modifications.
|
38 |
+
- Capture essential details such as the purpose of the code, any new functions, classes, or methods, and the overall impact of these changes on the project.
|
39 |
+
- Highlight any dependencies, error handling, or performance implications.
|
40 |
+
|
41 |
+
2. Summarize Markdown Files:
|
42 |
+
- Extract key points from documentation, readme files, and other markdown content.
|
43 |
+
- Identify sections related to project setup, usage instructions, change logs, or contributor guidelines.
|
44 |
+
- Note updates in the documentation and the implications for users or developers.
|
45 |
+
""",
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"role": "user",
|
49 |
+
"content": source_text,
|
50 |
+
}
|
51 |
+
]
|
52 |
+
chat_completion = make_api_call(client, messages, MODEL_NAME)
|
53 |
+
return chat_completion.choices[0].message.content
|
54 |
+
|
55 |
+
def qgen(source_text):
|
56 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
57 |
+
messages = [
|
58 |
+
{
|
59 |
+
"role": "system",
|
60 |
+
"content": "Respond with a list of 10 questions. The text in the user message must contain specific answers to each question. Each question must be on its own line. Just list the questions without any introductory text or numbers.",
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"role": "user",
|
64 |
+
"content": source_text,
|
65 |
+
}
|
66 |
+
]
|
67 |
+
chat_completion = make_api_call(client, messages, MODEL_NAME)
|
68 |
+
return chat_completion.choices[0].message.content
|
69 |
+
|
70 |
+
def agen(source_text, question):
|
71 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
72 |
+
messages = [
|
73 |
+
{
|
74 |
+
"role": "system",
|
75 |
+
"content": "Give a comprehensive and well-reasoned answer to the user question strictly based on the context below and try to give a detailed explanation while answering the questions. Also try to add some bonus tip to in each answer and some relevant example outside of the content.\n" + source_text
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"role": "user",
|
79 |
+
"content": question,
|
80 |
+
}
|
81 |
+
]
|
82 |
+
chat_completion = make_api_call(client, messages, MODEL_NAME)
|
83 |
+
return chat_completion.choices[0].message.content
|
84 |
+
|
85 |
+
def main():
|
86 |
+
input_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\docs\wasmedge_docs.csv"
|
87 |
+
output_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\local_repo\summary\wasmedge_docs_2.csv"
|
88 |
+
processed_contents = set()
|
89 |
+
output_file_exists = os.path.exists(output_path)
|
90 |
+
|
91 |
+
row_count = 0
|
92 |
+
|
93 |
+
with open(input_path, 'r', newline='', encoding='utf-8') as infile, \
|
94 |
+
open(output_path, 'a', newline='', encoding='utf-8') as outfile:
|
95 |
+
|
96 |
+
csv_reader = csv.reader(infile)
|
97 |
+
csv_writer = csv.writer(outfile)
|
98 |
+
|
99 |
+
if not output_file_exists:
|
100 |
+
pass
|
101 |
+
|
102 |
+
for row in csv_reader:
|
103 |
+
try:
|
104 |
+
main_content = row[0]
|
105 |
+
|
106 |
+
if main_content in processed_contents:
|
107 |
+
continue
|
108 |
+
|
109 |
+
summary = summarize(main_content)
|
110 |
+
qs = qgen(main_content)
|
111 |
+
qna_list = []
|
112 |
+
for q in qs.splitlines():
|
113 |
+
if len(q.strip()) == 0:
|
114 |
+
continue
|
115 |
+
answer = agen(main_content, q)
|
116 |
+
qna_list.append(f"Q: {q}\nA: {answer}")
|
117 |
+
|
118 |
+
csv_writer.writerow([main_content, f"Summary:\n{summary}"])
|
119 |
+
for qna in qna_list:
|
120 |
+
csv_writer.writerow([main_content, qna])
|
121 |
+
|
122 |
+
processed_contents.add(main_content)
|
123 |
+
|
124 |
+
row_count += 1
|
125 |
+
print(f"Processed row {row_count}")
|
126 |
+
|
127 |
+
except Exception as e:
|
128 |
+
print(f"Error processing row {row_count + 1}: {str(e)}")
|
129 |
+
continue
|
130 |
+
|
131 |
+
print(f"Modified data has been written to {output_path}")
|
132 |
+
print(f"Total rows summarized: {row_count}")
|
133 |
+
|
134 |
+
if __name__ == "__main__":
|
135 |
+
main()
|
scripts/Summary/summarizer_old.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import csv
|
3 |
+
import os
|
4 |
+
|
5 |
+
API_BASE_URL = "https://llama.us.gaianet.network/v1"
|
6 |
+
MODEL_NAME = "llama"
|
7 |
+
API_KEY = "GAIA"
|
8 |
+
|
9 |
+
def summarize(source_text):
|
10 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
11 |
+
chat_completion = client.chat.completions.create(
|
12 |
+
messages=[
|
13 |
+
{
|
14 |
+
"role": "system",
|
15 |
+
"content": """
|
16 |
+
You are an AI assistant designed to review pull requests (PRs) in GitHub repositories. Your task is to:
|
17 |
+
|
18 |
+
1. Summarize Code-related Files:
|
19 |
+
- Focus on key changes in the code, including additions, deletions, and modifications.
|
20 |
+
- Capture essential details such as the purpose of the code, any new functions, classes, or methods, and the overall impact of these changes on the project.
|
21 |
+
- Highlight any dependencies, error handling, or performance implications.
|
22 |
+
|
23 |
+
2. Summarize Markdown Files:
|
24 |
+
- Extract key points from documentation, readme files, and other markdown content.
|
25 |
+
- Identify sections related to project setup, usage instructions, change logs, or contributor guidelines.
|
26 |
+
- Note updates in the documentation and the implications for users or developers.
|
27 |
+
""",
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"role": "user",
|
31 |
+
"content": source_text,
|
32 |
+
}
|
33 |
+
],
|
34 |
+
model=MODEL_NAME,
|
35 |
+
stream=False,
|
36 |
+
)
|
37 |
+
return chat_completion.choices[0].message.content
|
38 |
+
|
39 |
+
def qgen(source_text):
|
40 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
41 |
+
chat_completion = client.chat.completions.create(
|
42 |
+
messages=[
|
43 |
+
{
|
44 |
+
"role": "system",
|
45 |
+
"content": "Respond with a list of 10 questions. The text in the user message must contain specific answers to each question. Each question must be on its own line. Just list the questions without any introductory text or numbers.",
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"role": "user",
|
49 |
+
"content": source_text,
|
50 |
+
}
|
51 |
+
],
|
52 |
+
model=MODEL_NAME,
|
53 |
+
stream=False,
|
54 |
+
)
|
55 |
+
return chat_completion.choices[0].message.content
|
56 |
+
|
57 |
+
def agen(source_text, question):
|
58 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
59 |
+
chat_completion = client.chat.completions.create(
|
60 |
+
messages=[
|
61 |
+
{
|
62 |
+
"role": "system",
|
63 |
+
"content": "Give a comprehensive and well-reasoned answer to the user question strictly based on the context below and try to give a detailed explanation while answering the questions. Also try to add some bonus tip to in each answer and some relevant example outside of the content.\n" + source_text},
|
64 |
+
{
|
65 |
+
"role": "user",
|
66 |
+
"content": question,
|
67 |
+
}
|
68 |
+
],
|
69 |
+
model=MODEL_NAME,
|
70 |
+
stream=False,
|
71 |
+
)
|
72 |
+
return chat_completion.choices[0].message.content
|
73 |
+
|
74 |
+
def main():
|
75 |
+
input_path = r"C:\Users\91745\OneDrive\Desktop\Github_analyser\output\main_repos\2.llamaedge_all.csv"
|
76 |
+
output_path = "test_old.csv"
|
77 |
+
processed_contents = set()
|
78 |
+
output_file_exists = os.path.exists(output_path)
|
79 |
+
if output_file_exists:
|
80 |
+
with open(output_path, 'r', newline='', encoding='utf-8') as csvfile:
|
81 |
+
csv_reader = csv.DictReader(csvfile)
|
82 |
+
for row in csv_reader:
|
83 |
+
processed_contents.add(row['Content'])
|
84 |
+
else:
|
85 |
+
pass
|
86 |
+
|
87 |
+
row_count = 0
|
88 |
+
|
89 |
+
with open(input_path, 'r', newline='', encoding='utf-8') as csvfile_in, \
|
90 |
+
open(output_path, 'a', newline='', encoding='utf-8') as csvfile_out:
|
91 |
+
|
92 |
+
csv_reader = csv.DictReader(csvfile_in)
|
93 |
+
fieldnames = ["Content", "Summary and Q&A"]
|
94 |
+
writer = csv.DictWriter(csvfile_out, fieldnames=fieldnames)
|
95 |
+
|
96 |
+
if not output_file_exists:
|
97 |
+
writer.writeheader()
|
98 |
+
|
99 |
+
for row in csv_reader:
|
100 |
+
main_content = row['Content']
|
101 |
+
if main_content in processed_contents:
|
102 |
+
continue
|
103 |
+
|
104 |
+
if len(main_content.split()) > 5000:
|
105 |
+
continue
|
106 |
+
summary = summarize(main_content)
|
107 |
+
qs = qgen(main_content)
|
108 |
+
qna_list = []
|
109 |
+
for q in qs.splitlines():
|
110 |
+
if len(q.strip()) == 0:
|
111 |
+
continue
|
112 |
+
answer = agen(main_content, q)
|
113 |
+
qna_list.append(f"Q: {q}\nA: {answer}")
|
114 |
+
|
115 |
+
writer.writerow({"Content": main_content, "Summary and Q&A": f"Summary:\n{summary}"})
|
116 |
+
for qna in qna_list:
|
117 |
+
writer.writerow({"Content": main_content, "Summary and Q&A": qna})
|
118 |
+
processed_contents.add(main_content)
|
119 |
+
|
120 |
+
row_count += 1
|
121 |
+
print(f"Processed row {row_count}")
|
122 |
+
|
123 |
+
print(f"Modified data has been written to {output_path}")
|
124 |
+
print(f"Total rows summarized: {row_count}")
|
125 |
+
|
126 |
+
if __name__ == "__main__":
|
127 |
+
main()
|
scripts/Summary/summary_only.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import openai
|
3 |
+
import logging
|
4 |
+
import time
|
5 |
+
|
6 |
+
logging.basicConfig(level=logging.INFO)
|
7 |
+
|
8 |
+
API_BASE_URL = "https://llama.us.gaianet.network/v1"
|
9 |
+
MODEL_NAME = "llama"
|
10 |
+
API_KEY = "GAIA"
|
11 |
+
|
12 |
+
client = openai.OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
13 |
+
|
14 |
+
def summarize_code(code, path):
|
15 |
+
try:
|
16 |
+
start_time = time.time()
|
17 |
+
response = client.chat.completions.create(
|
18 |
+
messages=[
|
19 |
+
{
|
20 |
+
"role": "system",
|
21 |
+
"content": "You are an expert software engineer. Your task is to analyze the provided code and generate a concise, coherent summary that captures the purpose, functionality, and key components of the code. Additionally, highlight any potential issues or areas for improvement."
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"role": "user",
|
25 |
+
"content": f"Code from {path}:\n\n{code}",
|
26 |
+
}
|
27 |
+
],
|
28 |
+
model=MODEL_NAME,
|
29 |
+
stream=False,
|
30 |
+
)
|
31 |
+
logging.info(f"API call took {time.time() - start_time} seconds.")
|
32 |
+
return response.choices[0].message.content
|
33 |
+
except Exception as e:
|
34 |
+
logging.error(f"Error in summarizing code: {e}")
|
35 |
+
return "Error: Could not summarize"
|
36 |
+
|
37 |
+
|
38 |
+
def summarize_csv_content(input_csv_file, output_csv_file):
|
39 |
+
try:
|
40 |
+
df = pd.read_csv(input_csv_file)
|
41 |
+
if 'Content' not in df.columns or 'Path' not in df.columns:
|
42 |
+
raise ValueError("'Content' or 'Path' column not found in the input CSV file.")
|
43 |
+
|
44 |
+
logging.info("Starting summarization...")
|
45 |
+
df['Summary'] = df.apply(lambda row: summarize_code(row['Content'], row['Path']) if pd.notnull(row['Content']) else "", axis=1)
|
46 |
+
|
47 |
+
df.to_csv(output_csv_file, index=False)
|
48 |
+
logging.info(f"Summaries have been generated and saved to {output_csv_file}")
|
49 |
+
except Exception as e:
|
50 |
+
logging.error(f"Error processing CSV: {e}")
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
input_csv_file = '/home/aru/Desktop/Github_analyser/Output/main_repos/wasmedge_shorten.csv'
|
54 |
+
output_csv_file = '/home/aru/Desktop/Github_analyser/Output/summary/wasmedge_summary.csv'
|
55 |
+
|
56 |
+
summarize_csv_content(input_csv_file, output_csv_file)
|
scripts/readme.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
1. The first step is to see various file types inside the repo.
|
2 |
+
2. The next step is to fetch data from these files using Python and various OCR methods for images and PDFs.
|
3 |
+
3. The third step is to pre-process this data (if needed) and generate summaries or Questions and Answers from the data.
|
scripts/upload.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import HfApi
|
2 |
+
api = HfApi()
|
3 |
+
|
4 |
+
api.upload_folder(
|
5 |
+
folder_path=r"C:\Users\91745\OneDrive\Desktop\Github_analyser",
|
6 |
+
repo_id="FiendHunter/Github_bot_scripts",
|
7 |
+
repo_type="dataset",
|
8 |
+
)
|