import pandas as pd import re from concurrent.futures import ProcessPoolExecutor from tqdm import tqdm import os import glob # Science keywords (formatted for regex word boundaries) science_keywords_list = [ # Physics subfields "classical mechanics", "quantum mechanics", "thermodynamics", "statistical mechanics", "electromagnetism", "optics", "acoustics", "relativity", "particle physics", "nuclear physics", "atomic physics", "molecular physics", "condensed matter physics", "solid state physics", "fluid dynamics", "plasma physics", "astrophysics", "cosmology", "gravitational physics", "space physics", "geophysics", "biophysics", "chemical physics", "material science", "energy physics", "environmental physics", "medical physics", "computational physics", "high energy physics", "theoretical physics", "experimental physics", "quantum field theory", "quantum optics", "quantum computing", "nanophysics", "nanotechnology", "electrodynamics", "magnetohydrodynamics", "photovoltaics", "superconductivity", "non-linear dynamics", "chaos theory", "string theory", "loop quantum gravity", # Astronomy and space exploration "astronomy", "astrophysics", "cosmology", "space exploration", "exoplanets", "spacecraft", "space shuttle", "rocket science", "satellites", "International Space Station", "Hubble Space Telescope", "Mars rovers", "moon landing", "lunar mission", "orbital mechanics", "zero gravity", "microgravity", "space colonization", "astrobiology", "planetology", "dark matter", "dark energy", "black holes", "neutron stars", "pulsars", "space probe", "deep space", "interstellar travel", "galaxies", "nebulae", "stellar evolution", "solar system", "comets", "asteroids", "meteorites", "space weather", "cosmic radiation", "extraterrestrial life", "SETI", "alien planets", "space suit", "spacewalk", "gravity assist", "launch vehicle", "reusable rockets", "space tourism", "private spaceflight", "space elevator", "falcon rocket", "starship", "NASA", # Keywords for chemistry and physics "chemistry lab", "physics lab", "science lab", "laboratory write-up", "experimental write-up", "lab report", "experiment procedure", "materials and methods", "scientific method", "chemical reaction", "titration", "stoichiometry", "molecular structure", "acid-base experiment", "organic chemistry lab", "Newton's laws experiment", "electric circuits lab", "magnetism experiment", "thermodynamics lab", "momentum lab", "experiment", "science project", "visual perception", "color and light", "optics experiment", "color spectrum", "materials list", "step-by-step instructions", "procedure", "instructions", "kids experiment", "learn about colors", "exploration of light", "hands-on science", "STEM activity", "learning through experimentation", "visual effects", "spin speed", "color change", # Math keywords "addition", "subtraction", "multiplication", "fraction", "decimal", "percentage", "ratio", "proportion", "equation", "coefficient", "polynomial", "quadratic", "exponential", "logarithm", "factorial", "sum", "quotient", "median", "data", "analysis", "graph", "function", "linear", "nonlinear", "slope", "intercept", "coordinate", "geometry", "angle", "triangle", "rectangle", "circle", "polygon", "perimeter", "circumference", "diameter", "radius", "pythagorean", "theorem", "trigonometry", "sine", "cosine", "tangent", "calculus", "sequence", "convergence", "vector", "algebra", "arithmetic", "computation", "measurement", # Logic, reasoning, problem-solving keywords "deductive reasoning", "inductive reasoning", "abductive reasoning", "logical fallacy", "syllogism", "proposition", "premise", "conclusion", "argument", "critical thinking", "analytical skills", "hypothesis testing", "problem analysis", "brainstorming", "decision making", "creative thinking", "heuristic", "algorithm", "data analysis", "causal reasoning", "correlation", "evidence-based reasoning", "validity", "soundness", "cognitive bias", "confirmation bias", "cognitive dissonance", "logical consistency", "counterargument", "debate", "dialectic", "socratic questioning", "root cause analysis", "SWOT analysis", "decision tree", "flow chart", "mind mapping", "ideation", "brainwriting", "lateral thinking", "problem decomposition", "synthesis", "pattern recognition", "inference", "troubleshooting", "risk assessment", "scenario planning", "cost-benefit analysis", "optimization", "simulation", "strategic planning", "logical operator", ] # Escape special regex characters and add word boundaries science_keywords = [ r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list ] # Combine science keywords into a single regex pattern using non-capturing groups science_regex = r'(?:' + r'|'.join(science_keywords) + r')' # Function to process a chunk of the dataset def process_chunk(chunk): # Assign column names if they are not already set if list(chunk.columns) != ['score', 'text', 'url']: chunk.columns = ['score', 'text', 'url'] # Use vectorized string operations for efficiency # Count the number of matches in each column score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE) url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE) text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE) # Handle NaN values by filling them with zero score_counts = score_counts.fillna(0) url_counts = url_counts.fillna(0) text_counts = text_counts.fillna(0) # Sum the counts to get the science score match_counts = score_counts + url_counts + text_counts match_counts = match_counts.astype(int) # # # Set a threshold for the minimum science score threshold = 50 # Adjust this value as needed # # # Filter rows that meet the threshold filtered_chunk = chunk[match_counts >= threshold].copy() filtered_chunk['science_score'] = match_counts[match_counts >= threshold] # Replace the original 'score' with 'science_score' filtered_chunk['score'] = filtered_chunk['science_score'] filtered_chunk = filtered_chunk.drop(columns=['science_score']) return filtered_chunk # Function to process a single CSV file def process_file(input_file, output_file): # Read the CSV file in chunks, assuming no header in the CSV file chunk_size = 10000 # Adjust this value based on your memory constraints reader = pd.read_csv(input_file, chunksize=chunk_size, header=None) # Prepare the output file first_chunk = True # Number of worker processes num_workers = 20 # Adjust based on your CPU cores # Batch size for chunks to process in parallel batch_size = num_workers * 4 # Adjust based on memory constraints chunk_list = [] with ProcessPoolExecutor(max_workers=num_workers) as executor: for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'): chunk_list.append(chunk) if len(chunk_list) == batch_size: # Process batch of chunks in parallel futures = [executor.submit(process_chunk, c) for c in chunk_list] for future in tqdm(futures, desc='Processing batch', leave=False): filtered_chunk = future.result() if not filtered_chunk.empty: if first_chunk: filtered_chunk.to_csv(output_file, mode='w', index=False, header=False) first_chunk = False else: filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) chunk_list = [] # Process any remaining chunks if chunk_list: futures = [executor.submit(process_chunk, c) for c in chunk_list] for future in tqdm(futures, desc='Processing last batch', leave=False): filtered_chunk = future.result() if not filtered_chunk.empty: if first_chunk: filtered_chunk.to_csv(output_file, mode='w', index=False, header=False) first_chunk = False else: filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) print(f'Finished processing {input_file}') # List of directories to process data_dir = '/media/joe/512-3/csv' years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] # Adjust years as needed directories = [os.path.join(data_dir, year) for year in years] # Process each CSV file in each directory for dir_path in directories: if not os.path.isdir(dir_path): print(f'Directory not found: {dir_path}') continue csv_files = glob.glob(os.path.join(dir_path, '*.csv')) print(f'Found {len(csv_files)} CSV files in {dir_path}') for input_file in csv_files: # Construct output file name base_name = os.path.basename(input_file) output_file = os.path.join( dir_path, 'math_' + base_name ) # Check if output file already exists to avoid reprocessing if os.path.exists(output_file): print(f'Output file already exists. Skipping: {output_file}') continue process_file(input_file, output_file)