|
import os |
|
import pandas as pd |
|
import pyarrow.parquet as pq |
|
import json |
|
import numpy as np |
|
|
|
|
|
base_dir = "/lustre/fsn1/projects/rech/fmr/uft12cr/statistics_corpus_full" |
|
set_dirs = [f"set_{i}" for i in range(1, 11)] |
|
test = False |
|
min_tokens_threshold = 100000 |
|
|
|
|
|
stats = { |
|
"total_tokens_by_language": {}, |
|
"total_words_by_language": {}, |
|
"total_tokens_by_collection": {}, |
|
"total_words_by_collection": {}, |
|
"doc_count_by_language": {}, |
|
"doc_count_by_collection": {}, |
|
"license_distribution": {}, |
|
"compression_rate_by_language": {}, |
|
"avg_doc_length_words_by_language": {}, |
|
"avg_doc_length_tokens_by_language": {}, |
|
"compression_rate_by_collection": {}, |
|
"avg_doc_length_words_by_collection": {}, |
|
"avg_doc_length_tokens_by_collection": {}, |
|
} |
|
|
|
processed_files = 0 |
|
errors_count = 0 |
|
|
|
|
|
def update_stats(df): |
|
required_columns = {"collection", "language", "token_count", "word_count", "license"} |
|
if not required_columns.issubset(df.columns): |
|
print("DataFrame is missing required columns.") |
|
return |
|
|
|
try: |
|
df.loc[df['collection'].notna() & df['collection'].str.contains("Github", case=False), 'language'] = 'code' |
|
|
|
for collection_name, col_df in df.groupby("collection"): |
|
stats["total_tokens_by_collection"].setdefault(collection_name, 0) |
|
stats["total_tokens_by_collection"][collection_name] += col_df["token_count"].sum() |
|
stats["total_words_by_collection"].setdefault(collection_name, 0) |
|
stats["total_words_by_collection"][collection_name] += col_df["word_count"].sum() |
|
stats["doc_count_by_collection"].setdefault(collection_name, 0) |
|
stats["doc_count_by_collection"][collection_name] += len(col_df) |
|
|
|
license_counts = col_df["license"].value_counts().to_dict() |
|
for license_type, count in license_counts.items(): |
|
stats["license_distribution"].setdefault(collection_name, {}).setdefault(license_type, 0) |
|
stats["license_distribution"][collection_name][license_type] += count |
|
|
|
for language, lang_df in df.groupby("language"): |
|
stats["total_tokens_by_language"].setdefault(language, 0) |
|
stats["total_tokens_by_language"][language] += lang_df["token_count"].sum() |
|
stats["total_words_by_language"].setdefault(language, 0) |
|
stats["total_words_by_language"][language] += lang_df["word_count"].sum() |
|
stats["doc_count_by_language"].setdefault(language, 0) |
|
stats["doc_count_by_language"][language] += len(lang_df) |
|
|
|
except Exception as e: |
|
print(f"Error updating statistics: {e}") |
|
|
|
|
|
all_parquet_files = [] |
|
for set_dir in set_dirs: |
|
set_path = os.path.join(base_dir, set_dir) |
|
if not os.path.isdir(set_path): |
|
print(f"Directory {set_path} not found, skipping...") |
|
continue |
|
try: |
|
parquet_files = [os.path.join(set_path, f) for f in os.listdir(set_path) if f.endswith(".parquet")] |
|
all_parquet_files.extend(parquet_files) |
|
except Exception as e: |
|
print(f"Error reading directory {set_path}: {e}") |
|
if test: |
|
all_parquet_files = all_parquet_files[:1] |
|
|
|
|
|
total_files = len(all_parquet_files) |
|
for file_path in all_parquet_files: |
|
try: |
|
df = pq.read_table(file_path).to_pandas() |
|
update_stats(df) |
|
processed_files += 1 |
|
except Exception as e: |
|
print(f"Error processing file {file_path}: {e}") |
|
errors_count += 1 |
|
|
|
if processed_files % 10 == 0 or processed_files == total_files: |
|
print(f"Processed {processed_files}/{total_files} files with {errors_count} errors.") |
|
|
|
|
|
for language in stats["total_tokens_by_language"]: |
|
try: |
|
total_tokens = stats["total_tokens_by_language"][language] |
|
total_words = stats["total_words_by_language"][language] |
|
doc_count = stats["doc_count_by_language"][language] |
|
|
|
if total_tokens >= min_tokens_threshold: |
|
stats["compression_rate_by_language"][language] = total_tokens / total_words if total_words > 0 else None |
|
stats["avg_doc_length_words_by_language"][language] = total_words / doc_count if doc_count > 0 else None |
|
stats["avg_doc_length_tokens_by_language"][language] = total_tokens / doc_count if doc_count > 0 else None |
|
else: |
|
stats["compression_rate_by_language"][language] = "N/A" |
|
stats["avg_doc_length_words_by_language"][language] = "N/A" |
|
stats["avg_doc_length_tokens_by_language"][language] = "N/A" |
|
except Exception as e: |
|
print(f"Error calculating stats for language {language}: {e}") |
|
|
|
|
|
for collection in stats["total_tokens_by_collection"]: |
|
try: |
|
total_tokens = stats["total_tokens_by_collection"][collection] |
|
total_words = stats["total_words_by_collection"][collection] |
|
doc_count = stats["doc_count_by_collection"][collection] |
|
|
|
if total_tokens >= min_tokens_threshold: |
|
stats["compression_rate_by_collection"][collection] = total_tokens / total_words if total_words > 0 else None |
|
stats["avg_doc_length_words_by_collection"][collection] = total_words / doc_count if doc_count > 0 else None |
|
stats["avg_doc_length_tokens_by_collection"][collection] = total_tokens / doc_count if doc_count > 0 else None |
|
else: |
|
stats["compression_rate_by_collection"][collection] = "N/A" |
|
stats["avg_doc_length_words_by_collection"][collection] = "N/A" |
|
stats["avg_doc_length_tokens_by_collection"][collection] = "N/A" |
|
except Exception as e: |
|
print(f"Error calculating stats for collection {collection}: {e}") |
|
|
|
|
|
def convert_to_native_types(stats): |
|
def convert(value): |
|
if isinstance(value, (np.integer, np.floating)): |
|
return value.item() |
|
elif isinstance(value, dict): |
|
return {k: convert(v) for k, v in value.items()} |
|
return value |
|
return {k: convert(v) for k, v in stats.items()} |
|
|
|
|
|
def print_stats(stats): |
|
output = [] |
|
output.append("============ Corpus Statistics Overview ============") |
|
total_tokens = sum(stats["total_tokens_by_collection"].values()) |
|
total_words = sum(stats["total_words_by_collection"].values()) |
|
total_docs = sum(stats["doc_count_by_collection"].values()) |
|
output.append(f"\nTotal Tokens in Corpus: {total_tokens:,}") |
|
output.append(f"Total Words in Corpus: {total_words:,}") |
|
output.append(f"Total Documents in Corpus: {total_docs:,}") |
|
|
|
|
|
output.append("\nTop 10 Collections by Total Tokens:") |
|
for collection, count in sorted(stats["total_tokens_by_collection"].items(), key=lambda x: x[1], reverse=True)[:10]: |
|
output.append(f" - {collection}: {count:,}") |
|
|
|
|
|
output.append("\nTop 10 Languages by Total Tokens:") |
|
for language, count in sorted(stats["total_tokens_by_language"].items(), key=lambda x: x[1], reverse=True)[:10]: |
|
output.append(f" - {language}: {count:,}") |
|
|
|
|
|
output.append("\nCompression Rate by Language (Top 10):") |
|
for language, rate in sorted(stats["compression_rate_by_language"].items(), key=lambda x: x[1] if isinstance(x[1], (int, float)) else 0, reverse=True)[:10]: |
|
output.append(f" - {language}: {rate:.2f}" if isinstance(rate, (int, float)) else f" - {language}: N/A") |
|
|
|
|
|
output.append("\nAverage Document Length (Words) by Language (Top 10):") |
|
for language, avg_len in sorted(stats["avg_doc_length_words_by_language"].items(), key=lambda x: x[1] if isinstance(x[1], (int, float)) else 0, reverse=True)[:10]: |
|
output.append(f" - {language}: {avg_len:.2f}" if isinstance(avg_len, (int, float)) else f" - {language}: N/A") |
|
|
|
|
|
output.append("\nLicense Distribution by Collection (Top 5):") |
|
for collection, licenses in list(sorted(stats["license_distribution"].items(), key=lambda x: sum(x[1].values()), reverse=True))[:5]: |
|
output.append(f" - {collection}:") |
|
for license_type, count in sorted(licenses.items(), key=lambda x: x[1], reverse=True): |
|
output.append(f" * {license_type}: {count:,}") |
|
|
|
output.append("====================================================") |
|
print("\n".join(output)) |
|
|
|
|
|
with open('stats_readable_output.txt', 'w') as f: |
|
f.write("\n".join(output)) |
|
|
|
|
|
print_stats(stats) |
|
|
|
|
|
stats_native = convert_to_native_types(stats) |
|
|
|
with open('stats_by_language.json', 'w') as f: |
|
json.dump({ |
|
'total_tokens_by_language': stats_native['total_tokens_by_language'], |
|
'total_words_by_language': stats_native['total_words_by_language'], |
|
'doc_count_by_language': stats_native['doc_count_by_language'], |
|
'compression_rate_by_language': stats_native['compression_rate_by_language'], |
|
'avg_doc_length_words_by_language': stats_native['avg_doc_length_words_by_language'], |
|
'avg_doc_length_tokens_by_language': stats_native['avg_doc_length_tokens_by_language'], |
|
}, f, indent=4) |
|
|
|
with open('stats_by_collection.json', 'w') as f: |
|
json.dump({ |
|
'total_tokens_by_collection': stats_native['total_tokens_by_collection'], |
|
'total_words_by_collection': stats_native['total_words_by_collection'], |
|
'doc_count_by_collection': stats_native['doc_count_by_collection'], |
|
'compression_rate_by_collection': stats_native['compression_rate_by_collection'], |
|
'avg_doc_length_words_by_collection': stats_native['avg_doc_length_words_by_collection'], |
|
'avg_doc_length_tokens_by_collection': stats_native['avg_doc_length_tokens_by_collection'], |
|
'license_distribution': stats_native['license_distribution'], |
|
}, f, indent=4) |
|
|