Spaces:
Sleeping
Sleeping
# streamlit_app.py | |
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Load the model and tokenizer | |
def load_model(): | |
tokenizer = AutoTokenizer.from_pretrained("meta-math/MetaMath-Mistral-7B") | |
model = AutoModelForCausalLM.from_pretrained("meta-math/MetaMath-Mistral-7B") | |
return tokenizer, model | |
tokenizer, model = load_model() | |
# Streamlit app layout | |
st.title("MetaMath Mistral 7B Question-Answering") | |
st.write("Ask any question, and the model will generate an answer:") | |
# Input from user | |
question = st.text_input("Enter your question:") | |
if st.button("Generate Answer"): | |
if question.strip(): | |
# Tokenize input | |
inputs = tokenizer.encode(question, return_tensors="pt") | |
# Generate response | |
with torch.no_grad(): | |
outputs = model.generate(inputs, max_length=200, num_return_sequences=1) | |
# Decode and display the output | |
answer = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
st.write("**Answer:**", answer) | |
else: | |
st.write("Please enter a question to get an answer.") | |