grpathak22 commited on
Commit
09d32b7
1 Parent(s): d9c3bf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -1
app.py CHANGED
@@ -1,5 +1,63 @@
1
  from transformers import AutoTokenizer, MT5ForConditionalGeneration
 
2
  import streamlit as st
 
 
 
 
 
 
 
3
  model = MT5ForConditionalGeneration.from_pretrained("google/mt5-base")
4
- st.write(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
 
1
  from transformers import AutoTokenizer, MT5ForConditionalGeneration
2
+ from transformers import T5Tokenizer
3
  import streamlit as st
4
+ import pandas as pd
5
+ from datasets import Dataset
6
+ import torch
7
+ from datasets import Dataset, DatasetDict
8
+ from transformers import Trainer, TrainingArguments
9
+
10
+ tokenizer = T5Tokenizer.from_pretrained('google/mt5-base')
11
  model = MT5ForConditionalGeneration.from_pretrained("google/mt5-base")
12
+ #st.write(model)
13
+
14
+ df = pd.read_csv('proverbs.csv')
15
+ df
16
+ dataset = Dataset.from_pandas(df)
17
+
18
+ def preprocess_function(examples):
19
+ inputs = examples['Proverb']
20
+ targets = examples['Meaning']
21
+ model_inputs = tokenizer(inputs, max_length=128, truncation=True, padding="max_length")
22
+ with tokenizer.as_target_tokenizer():
23
+ labels = tokenizer(targets, max_length=128, truncation=True, padding="max_length")
24
+ model_inputs["labels"] = labels["input_ids"]
25
+ return model_inputs
26
+
27
+
28
+ tokenized_dataset = dataset.map(preprocess_function, batched=True)
29
+
30
+
31
+ dataset_split = tokenized_dataset.train_test_split(test_size=0.2)
32
+
33
+
34
+ train_dataset = dataset_split['train']
35
+ test_dataset = dataset_split['test']
36
+
37
+
38
+ print(f"Training dataset size: {len(train_dataset)}")
39
+ print(f"Testing dataset size: {len(test_dataset)}")
40
+
41
+ training_args = TrainingArguments(
42
+ output_dir="./results",
43
+ evaluation_strategy="epoch",
44
+ learning_rate=2e-5,
45
+ per_device_train_batch_size=4,
46
+ per_device_eval_batch_size=4,
47
+ num_train_epochs=3,
48
+ weight_decay=0.01,
49
+ save_total_limit=2,
50
+ save_steps=500,
51
+ )
52
+
53
+ # Initialize Trainer
54
+ trainer = Trainer(
55
+ model=model,
56
+ args=training_args,
57
+ train_dataset=tokenized_dataset,
58
+ eval_dataset=tokenized_dataset, # Typically you'd have a separate eval dataset
59
+ )
60
+
61
+ # Fine-tune the model
62
+ trainer.train()
63