kengui commited on
Commit
e494b71
·
verified ·
1 Parent(s): 7bc8408

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -72
app.py CHANGED
@@ -1,83 +1,68 @@
1
- # -*- coding: utf-8 -*-
2
- """Untitled1.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1Oyv-OC4NLyS4SOfffFLwdI0wqvX7K_q2
8
- """
9
-
10
  import gradio as gr
11
  from transformers import pipeline
12
  from PyPDF2 import PdfReader
13
  from huggingface_hub import InferenceClient
14
- from google.colab import userdata
15
  import requests
16
  from PIL import Image
17
  import io
18
 
19
  pipe = pipeline("text2text-generation", model="asach/simpleT5-resume-summarization")
20
-
21
- reader = PdfReader("/KennethGuillont.pdf")
22
- text = ""
23
- for page in reader.pages:
24
- text += page.extract_text()
25
-
26
- summary = pipe(text, max_length=150, min_length=30)[0]['generated_text']
27
- summary
28
-
29
- my_key = userdata.get('HF')
30
-
31
  client = InferenceClient(api_key=my_key)
32
 
33
- model_name= 'meta-llama/Llama-3.2-3B-Instruct'
34
-
35
- agent_desc = """
36
- You are an AI agent helps a user generate a prompt to feed into an AI image
37
- generation model based on a summary of their resume given to you. The image should depict a rabbit
38
- within the the career feild related to the summary. encase the image prompt between
39
- two '---\n' marks, to separate it from the rest of the text.
40
- """
41
-
42
- print(summary)
43
-
44
- messages = [
45
- {"role": "user", "content": agent_desc},
46
- {"role": "user", "content": summary}
47
- ]
48
-
49
- stream = client.chat.completions.create(
50
- model=model_name,
51
- messages=messages,
52
- max_tokens=700,
53
- stream=True
54
- )
55
-
56
- response_text =""
57
-
58
- for chunk in stream:
59
- response_text += chunk.choices[0].delta.content
60
-
61
- print(response_text)
62
-
63
- print(response_text.replace('.','.\n'))
64
-
65
- image_prompt = response_text.split('---\n')[1]
66
- image_prompt
67
-
68
- API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
69
- headers = {"Authorization": f"Bearer {my_key}"}
70
-
71
- def query(payload):
72
- response = requests.post(API_URL, headers=headers, json=payload)
73
- return response.content
74
-
75
- image_bytes = query({
76
- "inputs": image_prompt,
77
- })
78
- # You can access the image with PIL.Image for example
79
- import io
80
- from PIL import Image
81
- image = Image.open(io.BytesIO(image_bytes))
82
-
83
- image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
  from PyPDF2 import PdfReader
4
  from huggingface_hub import InferenceClient
 
5
  import requests
6
  from PIL import Image
7
  import io
8
 
9
  pipe = pipeline("text2text-generation", model="asach/simpleT5-resume-summarization")
10
+ my_key = "YOUR_HUGGING_FACE_API_KEY"
 
 
 
 
 
 
 
 
 
 
11
  client = InferenceClient(api_key=my_key)
12
 
13
+ def process_pdf(pdf_file):
14
+ reader = PdfReader(pdf_file.name)
15
+ text = ""
16
+ for page in reader.pages:
17
+ text += page.extract_text()
18
+
19
+ summary = pipe(text, max_length=150, min_length=30)[0]['generated_text']
20
+ agent_desc = """
21
+ You are an AI agent helps a user generate a prompt to feed into an AI image
22
+ generation model based on a summary of their resume given to you. The image should depict a rabbit
23
+ within the the career field related to the summary. Encapsulate the image prompt between
24
+ two '---' marks.
25
+ """
26
+
27
+ messages = [
28
+ {"role": "user", "content": agent_desc},
29
+ {"role": "user", "content": summary}
30
+ ]
31
+
32
+ response_text = ""
33
+ stream = client.chat.completions.create(
34
+ model='meta-llama/Llama-3.2-3B-Instruct',
35
+ messages=messages,
36
+ max_tokens=700,
37
+ stream=True
38
+ )
39
+
40
+ for chunk in stream:
41
+ response_text += chunk.choices[0].delta.content
42
+
43
+ image_prompt = response_text.split('---')[1].strip()
44
+
45
+ API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
46
+ headers = {"Authorization": f"Bearer {my_key}"}
47
+
48
+ def query(payload):
49
+ response = requests.post(API_URL, headers=headers, json=payload)
50
+ return response.content
51
+
52
+ image_bytes = query({"inputs": image_prompt})
53
+ image = Image.open(io.BytesIO(image_bytes))
54
+
55
+ return summary, image
56
+
57
+ pdf_input = gr.inputs.File(label="Upload PDF Resume")
58
+ summary_output = gr.outputs.Textbox(label="Resume Summary")
59
+ image_output = gr.outputs.Image(label="Generated Image")
60
+
61
+ gr.Interface(
62
+ fn=process_pdf,
63
+ inputs=pdf_input,
64
+ outputs=[summary_output, image_output],
65
+ title="Resume Summarization and Image Generation",
66
+ description="Upload your PDF resume to get a summary and a related image of a rabbit.",
67
+ allow_flagging="never"
68
+ ).launch()