import gradio as gr from transformers import pipeline from PyPDF2 import PdfReader from huggingface_hub import InferenceClient import requests from PIL import Image import io pipe = pipeline("text2text-generation", model="asach/simpleT5-resume-summarization") my_key = "YOUR_HUGGING_FACE_API_KEY" client = InferenceClient(api_key=my_key) def process_pdf(pdf_file): reader = PdfReader(pdf_file.name) text = "" for page in reader.pages: text += page.extract_text() summary = pipe(text, max_length=150, min_length=30)[0]['generated_text'] agent_desc = """ You are an AI agent helps a user generate a prompt to feed into an AI image generation model based on a summary of their resume given to you. The image should depict a rabbit within the the career field related to the summary. Encapsulate the image prompt between two '---' marks. """ messages = [ {"role": "user", "content": agent_desc}, {"role": "user", "content": summary} ] response_text = "" stream = client.chat.completions.create( model='meta-llama/Llama-3.2-3B-Instruct', messages=messages, max_tokens=700, stream=True ) for chunk in stream: response_text += chunk.choices[0].delta.content image_prompt = response_text.split('---')[1].strip() API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" headers = {"Authorization": f"Bearer {my_key}"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.content image_bytes = query({"inputs": image_prompt}) image = Image.open(io.BytesIO(image_bytes)) return summary, image pdf_input = gr.inputs.File(label="Upload PDF Resume") summary_output = gr.outputs.Textbox(label="Resume Summary") image_output = gr.outputs.Image(label="Generated Image") gr.Interface( fn=process_pdf, inputs=pdf_input, outputs=[summary_output, image_output], title="Resume Summarization and Image Generation", description="Upload your PDF resume to get a summary and a related image of a rabbit.", allow_flagging="never" ).launch()