import gradio as gr from transformers import pipeline, AutoConfig import logging # Temporary workaround: Extend the Logger class to include warning_once def warning_once(self, msg, *args, **kwargs): if msg not in self._warned: self.warning(msg, *args, **kwargs) self._warned.add(msg) logging.Logger.warning_once = warning_once logging.Logger._warned = set() # Load the model configuration with trust_remote_code to execute local configuration config = AutoConfig.from_pretrained('tiiuae/falcon-40b-instruct', trust_remote_code=True) # Load the model once when the script starts using the loaded config generator = pipeline('text-generation', model='tiiuae/falcon-40b-instruct', config=config) def generate_text(prompt): # Use the preloaded model return generator(prompt, max_length=100)[0]['generated_text'] def main(): with gr.Blocks() as demo: gr.Markdown("## Text Generation Model") gr.Markdown("This model generates text based on the input prompt. Powered by Hugging Face transformers.") prompt = gr.Textbox(lines=2, placeholder="Type your prompt here...") output = gr.Text(label="Generated Text") prompt.change(fn=generate_text, inputs=prompt, outputs=output) demo.launch() if __name__ == "__main__": main()