File size: 9,669 Bytes
0872027
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import pandas as pd
import re
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import os
import glob

 # Science keywords (formatted for regex word boundaries)
science_keywords_list = [
    # Physics subfields
    "classical mechanics", "quantum mechanics", "thermodynamics", "statistical mechanics",
    "electromagnetism", "optics", "acoustics", "relativity",
    "particle physics", "nuclear physics", "atomic physics", "molecular physics",
    "condensed matter physics", "solid state physics", "fluid dynamics", "plasma physics",
    "astrophysics", "cosmology", "gravitational physics", "space physics",
    "geophysics", "biophysics", "chemical physics", "material science",
    "energy physics", "environmental physics", "medical physics", "computational physics",
    "high energy physics", "theoretical physics", "experimental physics", "quantum field theory",
    "quantum optics", "quantum computing", "nanophysics", "nanotechnology",
    "electrodynamics", "magnetohydrodynamics", "photovoltaics", "superconductivity",
    "non-linear dynamics", "chaos theory", "string theory", "loop quantum gravity",

    # Astronomy and space exploration
    "astronomy", "astrophysics", "cosmology", "space exploration", "exoplanets",
    "spacecraft", "space shuttle", "rocket science", "satellites", "International Space Station",
    "Hubble Space Telescope", "Mars rovers", "moon landing", "lunar mission", "orbital mechanics",
    "zero gravity", "microgravity", "space colonization", "astrobiology", "planetology",
    "dark matter", "dark energy", "black holes", "neutron stars", "pulsars",
    "space probe", "deep space", "interstellar travel", "galaxies", "nebulae",
    "stellar evolution", "solar system", "comets", "asteroids", "meteorites",
    "space weather", "cosmic radiation", "extraterrestrial life", "SETI", "alien planets",
    "space suit", "spacewalk", "gravity assist", "launch vehicle", "reusable rockets",
    "space tourism", "private spaceflight", "space elevator", "falcon rocket", "starship", "NASA",

    # Keywords for chemistry and physics
    "chemistry lab", "physics lab", "science lab", "laboratory write-up",
    "experimental write-up", "lab report", "experiment procedure",
    "materials and methods", "scientific method", "chemical reaction",
    "titration", "stoichiometry", "molecular structure", "acid-base experiment",
    "organic chemistry lab", "Newton's laws experiment", "electric circuits lab",
    "magnetism experiment", "thermodynamics lab", "momentum lab",
    "experiment", "science project", "visual perception", "color and light", "optics experiment",
    "color spectrum", "materials list", "step-by-step instructions", "procedure",
    "instructions", "kids experiment", "learn about colors", "exploration of light",
    "hands-on science", "STEM activity",
    "learning through experimentation", "visual effects",
    "spin speed", "color change",

    # Math keywords
    "addition", "subtraction", "multiplication",
    "fraction", "decimal", "percentage", "ratio", "proportion",
    "equation", "coefficient",
    "polynomial", "quadratic", "exponential", "logarithm", "factorial",
    "sum", "quotient",
    "median",
    "data", "analysis",
    "graph", "function", "linear",
    "nonlinear", "slope", "intercept", "coordinate",
    "geometry", "angle", "triangle", "rectangle", "circle",
    "polygon", "perimeter",
    "circumference", "diameter", "radius", "pythagorean",
    "theorem", "trigonometry", "sine", "cosine", "tangent",
    "calculus",
    "sequence", "convergence",
    "vector", "algebra", "arithmetic",
    "computation", "measurement",

    # Logic, reasoning, problem-solving keywords
    "deductive reasoning", "inductive reasoning", "abductive reasoning", "logical fallacy",
    "syllogism", "proposition", "premise", "conclusion",
    "argument", "critical thinking", "analytical skills", "hypothesis testing",
    "problem analysis", "brainstorming", "decision making", "creative thinking",
    "heuristic", "algorithm", "data analysis", "causal reasoning",
    "correlation", "evidence-based reasoning", "validity", "soundness",
    "cognitive bias", "confirmation bias", "cognitive dissonance", "logical consistency",
    "counterargument", "debate", "dialectic", "socratic questioning",
    "root cause analysis", "SWOT analysis", "decision tree", "flow chart",
    "mind mapping", "ideation", "brainwriting", "lateral thinking",
    "problem decomposition", "synthesis", "pattern recognition", "inference",
    "troubleshooting", "risk assessment", "scenario planning", "cost-benefit analysis",
    "optimization", "simulation", "strategic planning", "logical operator",
]
    

# Escape special regex characters and add word boundaries
science_keywords = [
    r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list
]

# Combine science keywords into a single regex pattern using non-capturing groups
science_regex = r'(?:' + r'|'.join(science_keywords) + r')'

# Function to process a chunk of the dataset
def process_chunk(chunk):
    # Assign column names if they are not already set
    if list(chunk.columns) != ['score', 'text', 'url']:
        chunk.columns = ['score', 'text', 'url']
    
    # Use vectorized string operations for efficiency
    # Count the number of matches in each column
    score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
    url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
    text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
    
    # Handle NaN values by filling them with zero
    score_counts = score_counts.fillna(0)
    url_counts = url_counts.fillna(0)
    text_counts = text_counts.fillna(0)
    
    # Sum the counts to get the science score
    match_counts = score_counts + url_counts + text_counts
    match_counts = match_counts.astype(int)
    
    #
    #
    # Set a threshold for the minimum science score
    threshold = 50  # Adjust this value as needed
    #
    #
    
    # Filter rows that meet the threshold
    filtered_chunk = chunk[match_counts >= threshold].copy()
    filtered_chunk['science_score'] = match_counts[match_counts >= threshold]
    
    # Replace the original 'score' with 'science_score'
    filtered_chunk['score'] = filtered_chunk['science_score']
    filtered_chunk = filtered_chunk.drop(columns=['science_score'])
    
    return filtered_chunk

# Function to process a single CSV file
def process_file(input_file, output_file):
    # Read the CSV file in chunks, assuming no header in the CSV file
    chunk_size = 10000  # Adjust this value based on your memory constraints
    reader = pd.read_csv(input_file, chunksize=chunk_size, header=None)
    
    # Prepare the output file
    first_chunk = True
    
    # Number of worker processes
    num_workers = 20  # Adjust based on your CPU cores
    
    # Batch size for chunks to process in parallel
    batch_size = num_workers * 4  # Adjust based on memory constraints
    
    chunk_list = []
    with ProcessPoolExecutor(max_workers=num_workers) as executor:
        for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'):
            chunk_list.append(chunk)
            if len(chunk_list) == batch_size:
                # Process batch of chunks in parallel
                futures = [executor.submit(process_chunk, c) for c in chunk_list]
                for future in tqdm(futures, desc='Processing batch', leave=False):
                    filtered_chunk = future.result()
                    if not filtered_chunk.empty:
                        if first_chunk:
                            filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
                            first_chunk = False
                        else:
                            filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
                chunk_list = []
        # Process any remaining chunks
        if chunk_list:
            futures = [executor.submit(process_chunk, c) for c in chunk_list]
            for future in tqdm(futures, desc='Processing last batch', leave=False):
                filtered_chunk = future.result()
                if not filtered_chunk.empty:
                    if first_chunk:
                        filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
                        first_chunk = False
                    else:
                        filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
    print(f'Finished processing {input_file}')

# List of directories to process
data_dir = '/media/joe/512-3/csv'
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)]  # Adjust years as needed
directories = [os.path.join(data_dir, year) for year in years]

# Process each CSV file in each directory
for dir_path in directories:
    if not os.path.isdir(dir_path):
        print(f'Directory not found: {dir_path}')
        continue
    csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
    print(f'Found {len(csv_files)} CSV files in {dir_path}')
    for input_file in csv_files:
        # Construct output file name
        base_name = os.path.basename(input_file)
        output_file = os.path.join(
            dir_path, 'math_' + base_name
        )
        
        # Check if output file already exists to avoid reprocessing
        if os.path.exists(output_file):
            print(f'Output file already exists. Skipping: {output_file}')
            continue
        
        process_file(input_file, output_file)