jhansi1 commited on
Commit
0e5e438
1 Parent(s): a78e387

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -18
app.py CHANGED
@@ -1,27 +1,31 @@
1
- import gradio as gr
2
  from transformers import pipeline
3
  from datasets import load_dataset
4
 
5
- # Initialize text-generation pipeline
6
  model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
7
  pipe = pipeline("text-generation", model=model_name)
8
 
9
- # Load dataset (fetching the dataset via the API endpoint you provided)
10
- dataset = load_dataset("refugee-law-lab/canadian-legal-data", split="train")
11
 
12
- def generate_response(prompt):
13
- # Pass the prompt to the model pipeline
14
- generated_text = pipe(prompt, max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"]
15
- return generated_text
16
 
17
- # Set up Gradio interface (optional if you want a web app)
18
- demo = gr.Interface(
19
- fn=generate_response,
20
- inputs=gr.Textbox(lines=2, placeholder="Enter your question or prompt here..."),
21
- outputs="text",
22
- title="Canadian Legal Text Generator"
23
- )
24
 
25
- # Launch app
26
- if __name__ == "__main__":
27
- demo.launch()
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
  from transformers import pipeline
3
  from datasets import load_dataset
4
 
5
+ # Initialize text-generation pipeline with the model
6
  model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
7
  pipe = pipeline("text-generation", model=model_name)
8
 
9
+ # Load the dataset
10
+ ds = load_dataset("refugee-law-lab/canadian-legal-data", "default", split="train")
11
 
12
+ # Streamlit interface
13
+ st.title("Canadian Legal Text Generator")
14
+ st.write("Enter a prompt related to Canadian legal data and generate text using Llama-3.1.")
 
15
 
16
+ # Show dataset sample
17
+ st.subheader("Sample Data from Canadian Legal Dataset:")
18
+ st.write(ds[:5]) # Displaying the first 5 rows of the dataset
 
 
 
 
19
 
20
+ # Prompt input
21
+ prompt = st.text_area("Enter your prompt:", placeholder="Type something...")
22
+
23
+ if st.button("Generate Response"):
24
+ if prompt:
25
+ # Generate text based on the prompt
26
+ with st.spinner("Generating response..."):
27
+ generated_text = pipe(prompt, max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"]
28
+ st.write("**Generated Text:**")
29
+ st.write(generated_text)
30
+ else:
31
+ st.write("Please enter a prompt to generate a response.")