# Import necessary libraries from datasets import load_dataset from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments import pandas as pd from sklearn.model_selection import train_test_split # Convert PDF to DataFrame (assuming it's already loaded as df in CSV or DataFrame format) df = pd.read_csv('diabetes_data.csv') # Replace with the path to your CSV df['label'] = (df['target_column'] > threshold_value).astype(int) # Adjust target column for binary classification # Split the dataset train_df, test_df = train_test_split(df, test_size=0.2) train_df.to_csv("train.csv", index=False) test_df.to_csv("test.csv", index=False) # Load dataset with Hugging Face Datasets dataset = load_dataset('csv', data_files={'train': 'train.csv', 'test': 'test.csv'}) # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) # Tokenize the dataset def preprocess_function(examples): return tokenizer(examples['text_column'], padding="max_length", truncation=True) tokenized_dataset = dataset.map(preprocess_function, batched=True) # Set training arguments training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) # Initialize Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_dataset['train'], eval_dataset=tokenized_dataset['test'], ) # Train and evaluate trainer.train() trainer.evaluate()