# falcon_180b_gradio.py import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer def main(): # Load the model and tokenizer model_name = "bigscience/falcon-180b" # Replace with the correct model name if different tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Define the function that uses the model def generate_text(input_text): inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True) outputs = model.generate(**inputs, max_length=512, min_length=50, num_return_sequences=1) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Create and launch the Gradio interface iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Falcon 180B Text Generator") iface.launch() if __name__ == "__main__": main()