Josephgflowers
commited on
Commit
•
9816cdd
1
Parent(s):
48952c1
Upload 2 files
Browse files- .gitattributes +1 -0
- finance_train_204181.csv +3 -0
- find-fin-fine.py +137 -0
.gitattributes
CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
59 |
+
finance_train_204181.csv filter=lfs diff=lfs merge=lfs -text
|
finance_train_204181.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b5f039d1d5b0423de2c7c85c38b5ba2192665941e75f192e1156fdfc6c75cc51
|
3 |
+
size 6506281583
|
find-fin-fine.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import re
|
3 |
+
from concurrent.futures import ProcessPoolExecutor
|
4 |
+
from tqdm import tqdm
|
5 |
+
import os
|
6 |
+
import glob
|
7 |
+
|
8 |
+
# Expanded financial keywords (formatted for regex word boundaries)
|
9 |
+
financial_keywords = [
|
10 |
+
r"\bfinance\b", r"\bfinancial\b", r"\beconomy\b", r"\beconomic\b", r"\bmarket\b", r"\bstock\b", r"\bbond\b",
|
11 |
+
r"\bshare\b", r"\basset\b", r"\bportfolio\b", r"\binvestment\b", r"\binvestor\b", r"\btrading\b", r"\bbroker\b",
|
12 |
+
r"\bcommodity\b", r"\bcurrency\b", r"\bforeign exchange\b", r"\bforex\b", r"\bderivative\b", r"\boption\b",
|
13 |
+
r"\bfutures\b", r"\bhedging\b", r"\brisk\b", r"\bdividend\b", r"\binterest\b", r"\bliquidity\b", r"\bcredit\b",
|
14 |
+
r"\bdebt\b", r"\bcapital\b", r"\bfund\b", r"\bventure\b", r"\bvaluation\b", r"\bmerger\b", r"\bacquisition\b",
|
15 |
+
r"\bIPO\b", r"\binitial public offering\b", r"\bprivate equity\b", r"\bhedge fund\b", r"\bmutual fund\b",
|
16 |
+
r"\bETF\b", r"\bexchange-traded fund\b", r"\bfinancial statement\b", r"\bbalance sheet\b", r"\bincome statement\b",
|
17 |
+
r"\bcash flow\b", r"\brevenue\b", r"\bprofit\b", r"\bloss\b", r"\bexpense\b", r"\bbudget\b", r"\bforecast\b",
|
18 |
+
r"\banalysis\b", r"\bearnings\b", r"\bEBITDA\b", r"\bEPS\b", r"\bP/E ratio\b", r"\bprice to earnings\b",
|
19 |
+
r"\bROI\b", r"\breturn on investment\b", r"\bROE\b", r"\breturn on equity\b", r"\bdiversification\b",
|
20 |
+
r"\bNASDAQ\b", r"\bNYSE\b", r"\bS&P 500\b", r"\bDow Jones\b", r"\bFTSE\b", r"\bNikkei\b", r"\bcommodities\b",
|
21 |
+
r"\bgold\b", r"\bsilver\b", r"\boil\b", r"\bGDP\b", r"\bgross domestic product\b", r"\binflation\b",
|
22 |
+
r"\bunemployment\b", r"\binterest rate\b", r"\bfederal reserve\b", r"\bcentral bank\b", r"\bmonetary policy\b",
|
23 |
+
r"\bquantitative easing\b", r"\bfiscal policy\b", r"\btax\b", r"\btreasury\b", r"\bbudget deficit\b",
|
24 |
+
r"\bnational debt\b", r"\bcredit rating\b", r"\bstandard & poor's\b", r"\bmoody's\b", r"\bfitch\b",
|
25 |
+
r"\bsovereign wealth fund\b", r"\binternational monetary fund\b", r"\bIMF\b", r"\bworld bank\b",
|
26 |
+
r"\bbasel III\b", r"\bdodd-frank\b", r"\bfinancial regulation\b", r"\binsurance\b", r"\breal estate\b",
|
27 |
+
r"\bmortgage\b", r"\bloan\b", r"\bbank\b", r"\bbanking\b", r"\bfintech\b", r"\bblockchain\b", r"\bcryptocurrency\b",
|
28 |
+
r"\bbitcoin\b", r"\bethereum\b", r"\bsmart contract\b", r"\bdigital currency\b", r"\bdecentralized finance\b",
|
29 |
+
# Add more general financial terms as needed
|
30 |
+
]
|
31 |
+
|
32 |
+
# Combine financial keywords into a single regex pattern using non-capturing groups
|
33 |
+
financial_regex = r'(?:' + r'|'.join(financial_keywords) + r')'
|
34 |
+
|
35 |
+
# Function to process a chunk of the dataset
|
36 |
+
def process_chunk(chunk):
|
37 |
+
# Use vectorized string operations for efficiency
|
38 |
+
# Count the number of matches in each column
|
39 |
+
score_counts = chunk['score'].astype(str).str.count(financial_regex, flags=re.IGNORECASE)
|
40 |
+
url_counts = chunk['url'].astype(str).str.count(financial_regex, flags=re.IGNORECASE)
|
41 |
+
text_counts = chunk['text'].astype(str).str.count(financial_regex, flags=re.IGNORECASE)
|
42 |
+
|
43 |
+
# Handle NaN values by filling them with zero
|
44 |
+
score_counts = score_counts.fillna(0)
|
45 |
+
url_counts = url_counts.fillna(0)
|
46 |
+
text_counts = text_counts.fillna(0)
|
47 |
+
|
48 |
+
# Sum the counts to get the financial score
|
49 |
+
match_counts = score_counts + url_counts + text_counts
|
50 |
+
match_counts = match_counts.astype(int)
|
51 |
+
|
52 |
+
# Set a threshold for the minimum financial score
|
53 |
+
threshold = 14 # Adjust this value as needed
|
54 |
+
|
55 |
+
# Filter rows that meet the threshold
|
56 |
+
filtered_chunk = chunk[match_counts >= threshold].copy()
|
57 |
+
filtered_chunk['financial_score'] = match_counts[match_counts >= threshold]
|
58 |
+
|
59 |
+
# Replace the original 'score' with 'financial_score'
|
60 |
+
filtered_chunk['score'] = filtered_chunk['financial_score']
|
61 |
+
filtered_chunk = filtered_chunk.drop(columns=['financial_score'])
|
62 |
+
|
63 |
+
return filtered_chunk
|
64 |
+
|
65 |
+
# Function to process a single CSV file
|
66 |
+
def process_file(input_file, output_file):
|
67 |
+
# Read the CSV file in chunks
|
68 |
+
chunk_size = 10000 # Adjust this value based on your memory constraints
|
69 |
+
reader = pd.read_csv(input_file, chunksize=chunk_size)
|
70 |
+
|
71 |
+
# Prepare the output file
|
72 |
+
first_chunk = True
|
73 |
+
|
74 |
+
# Number of worker processes
|
75 |
+
num_workers = 8 # Adjust based on your CPU cores
|
76 |
+
|
77 |
+
# Batch size for chunks to process in parallel
|
78 |
+
batch_size = num_workers * 4 # Adjust based on memory constraints
|
79 |
+
|
80 |
+
chunk_list = []
|
81 |
+
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
82 |
+
futures = []
|
83 |
+
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'):
|
84 |
+
chunk_list.append(chunk)
|
85 |
+
if len(chunk_list) == batch_size:
|
86 |
+
# Process batch of chunks in parallel
|
87 |
+
futures = [executor.submit(process_chunk, c) for c in chunk_list]
|
88 |
+
for future in tqdm(futures, desc='Processing batch', leave=False):
|
89 |
+
filtered_chunk = future.result()
|
90 |
+
if not filtered_chunk.empty:
|
91 |
+
if first_chunk:
|
92 |
+
filtered_chunk.to_csv(output_file, mode='w', index=False)
|
93 |
+
first_chunk = False
|
94 |
+
else:
|
95 |
+
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
|
96 |
+
chunk_list = []
|
97 |
+
# Process any remaining chunks
|
98 |
+
if chunk_list:
|
99 |
+
futures = [executor.submit(process_chunk, c) for c in chunk_list]
|
100 |
+
for future in tqdm(futures, desc='Processing last batch', leave=False):
|
101 |
+
filtered_chunk = future.result()
|
102 |
+
if not filtered_chunk.empty:
|
103 |
+
if first_chunk:
|
104 |
+
filtered_chunk.to_csv(output_file, mode='w', index=False)
|
105 |
+
first_chunk = False
|
106 |
+
else:
|
107 |
+
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
|
108 |
+
print(f'Finished processing {input_file}')
|
109 |
+
|
110 |
+
# List of directories to process
|
111 |
+
data_dir = '/media/joe/512-3/csv'
|
112 |
+
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] # Adjust years as needed
|
113 |
+
directories = [os.path.join(data_dir, year) for year in years]
|
114 |
+
|
115 |
+
# Process each CSV file in each directory
|
116 |
+
for dir_path in directories:
|
117 |
+
if not os.path.isdir(dir_path):
|
118 |
+
print(f'Directory not found: {dir_path}')
|
119 |
+
continue
|
120 |
+
csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
|
121 |
+
print(f'Found {len(csv_files)} CSV files in {dir_path}')
|
122 |
+
for input_file in csv_files:
|
123 |
+
# Construct output file name
|
124 |
+
# Example: If input_file is '/path/CC-MAIN-2013/train-00001-of-00001.csv'
|
125 |
+
# Output file will be 'fin_CC-MAIN-2013_train-00001-of-00001.csv'
|
126 |
+
base_name = os.path.basename(input_file)
|
127 |
+
output_file = os.path.join(
|
128 |
+
dir_path, 'fin_' + base_name
|
129 |
+
)
|
130 |
+
|
131 |
+
# Check if output file already exists to avoid reprocessing
|
132 |
+
if os.path.exists(output_file):
|
133 |
+
print(f'Output file already exists. Skipping: {output_file}')
|
134 |
+
continue
|
135 |
+
|
136 |
+
process_file(input_file, output_file)
|
137 |
+
|