import streamlit as st from transformers import pipeline from datasets import load_dataset # Initialize text-generation pipeline with the model model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF" pipe = pipeline("text-generation", model=model_name) # Load the dataset ds = load_dataset("refugee-law-lab/canadian-legal-data", "default", split="train") # Streamlit interface st.title("Canadian Legal Text Generator") st.write("Enter a prompt related to Canadian legal data and generate text using Llama-3.1.") # Show dataset sample st.subheader("Sample Data from Canadian Legal Dataset:") st.write(ds[:5]) # Displaying the first 5 rows of the dataset # Prompt input prompt = st.text_area("Enter your prompt:", placeholder="Type something...") if st.button("Generate Response"): if prompt: # Generate text based on the prompt with st.spinner("Generating response..."): generated_text = pipe(prompt, max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"] st.write("**Generated Text:**") st.write(generated_text) else: st.write("Please enter a prompt to generate a response.")