prasadmahajan21 commited on
Commit
33c23f6
β€’
1 Parent(s): f2af489

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +8 -4
  2. app.py +104 -147
  3. gitattributes +35 -0
  4. requirements.txt +14 -6
README.md CHANGED
@@ -1,12 +1,16 @@
1
  ---
2
- title: LMM For Better Captioning
3
- emoji: πŸ–Ό
4
  colorFrom: purple
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
 
 
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Share Captioner
3
+ emoji: πŸƒ
4
  colorFrom: purple
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.36.1
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
+ **Paper or resources for more information:**
14
+ [[Project](https://ShareGPT4V.github.io/)] [[Paper](https://huggingface.co/papers/2311.12793)] [[Code](https://github.com/ShareGPT4Omni/ShareGPT4V)]
15
+
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,154 +1,111 @@
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
  import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
  }
65
  """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
  with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
-
153
- if __name__ == "__main__":
154
  demo.launch()
 
1
  import gradio as gr
 
 
 
 
 
2
  import torch
3
+ import spaces
4
+ from PIL import Image
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+
7
+ model_name = "Lin-Chen/ShareCaptioner"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_name, device_map="cpu", torch_dtype=torch.float16, trust_remote_code=True).eval()
11
+ model.tokenizer = tokenizer
12
+
13
+ model.cuda()
14
+
15
+ seg1 = '<|User|>:'
16
+ seg2 = f'Analyze the image in a comprehensive and detailed manner.{model.eoh}\n<|Bot|>:'
17
+ seg_emb1 = model.encode_text(seg1, add_special_tokens=True).cuda()
18
+ seg_emb2 = model.encode_text(seg2, add_special_tokens=False).cuda()
19
+
20
+
21
+ @spaces.GPU
22
+ def detailed_caption(img_path):
23
+ subs = []
24
+ image = Image.open(img_path).convert("RGB")
25
+ subs.append(model.vis_processor(image).unsqueeze(0))
26
+
27
+ subs = torch.cat(subs, dim=0).cuda()
28
+ tmp_bs = subs.shape[0]
29
+ tmp_seg_emb1 = seg_emb1.repeat(tmp_bs, 1, 1)
30
+ tmp_seg_emb2 = seg_emb2.repeat(tmp_bs, 1, 1)
31
+ with torch.cuda.amp.autocast():
32
+ with torch.no_grad():
33
+ subs = model.encode_img(subs)
34
+ input_emb = torch.cat([tmp_seg_emb1, subs, tmp_seg_emb2], dim=1)
35
+ out_embeds = model.internlm_model.generate(inputs_embeds=input_emb,
36
+ max_length=500,
37
+ num_beams=3,
38
+ min_length=1,
39
+ do_sample=True,
40
+ repetition_penalty=1.5,
41
+ length_penalty=1.0,
42
+ temperature=1.,
43
+ eos_token_id=model.tokenizer.eos_token_id,
44
+ num_return_sequences=1,
45
+ )
46
+
47
+ return model.decode_text([out_embeds[0]])
48
+
49
+
50
+ block_css = """
51
+ #buttons button {
52
+ min-width: min(120px,100%);
 
 
 
 
 
 
53
  }
54
  """
55
+ title_markdown = ("""
56
+ # 🐬 ShareGPT4V: Improving Large Multi-modal Models with Better Captions
57
+ [[Project Page](https://sharegpt4v.github.io/)] [[Code](https://github.com/ShareGPT4Omni/ShareGPT4V)] | [[Paper](https://github.com/InternLM/InternLM-XComposer/blob/main/projects/ShareGPT4V/ShareGPT4V.pdf)]
58
+ """)
59
+ tos_markdown = ("""
60
+ ### Terms of use
61
+ By using this service, users are required to agree to the following terms:
62
+ The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.
63
+ For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
64
+ """)
65
+ learn_more_markdown = ("""
66
+ ### License
67
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
68
+ """)
69
+ ack_markdown = ("""
70
+ ### Acknowledgement
71
+ The template for this web demo is from [LLaVA](https://github.com/haotian-liu/LLaVA), and we are very grateful to LLaVA for their open source contributions to the community!
72
+ """)
73
+
74
+
75
+ def build_demo():
76
+ with gr.Blocks(title="Share-Captioner", theme=gr.themes.Default(), css=block_css) as demo:
77
+ gr.Markdown(title_markdown)
78
 
79
  with gr.Row():
80
+ with gr.Column(scale=5):
81
+ with gr.Row(elem_id="Model ID"):
82
+ gr.Dropdown(
83
+ choices=['Share-Captioner'],
84
+ value='Share-Captioner',
85
+ interactive=True,
86
+ label='Model ID',
87
+ container=False)
88
+ img_path = gr.Image(label="Image", type="filepath")
89
+ with gr.Column(scale=8):
90
+ with gr.Row():
91
+ caption = gr.Textbox(label='Caption')
92
+ with gr.Row():
93
+ submit_btn = gr.Button(
94
+ value="πŸš€ Generate", variant="primary")
95
+ regenerate_btn = gr.Button(value="πŸ”„ Regenerate")
96
+
97
+ gr.Markdown(tos_markdown)
98
+ gr.Markdown(learn_more_markdown)
99
+ gr.Markdown(ack_markdown)
100
+
101
+ submit_btn.click(detailed_caption, inputs=[
102
+ img_path], outputs=[caption])
103
+ regenerate_btn.click(detailed_caption, inputs=[
104
+ img_path], outputs=[caption])
105
+
106
+ return demo
107
+
108
+
109
+ if __name__ == '__main__':
110
+ demo = build_demo()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  demo.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt CHANGED
@@ -1,6 +1,14 @@
1
- accelerate
2
- diffusers
3
- invisible_watermark
4
- torch
5
- transformers
6
- xformers
 
 
 
 
 
 
 
 
 
1
+ transformers==4.32.0
2
+ accelerate==0.24.0
3
+ tiktoken==0.5.1
4
+ einops==0.7.0
5
+ transformers_stream_generator==0.0.4
6
+ scipy==1.11.3
7
+ torch==2.1.2
8
+ torchvision==0.16.2
9
+ pillow==10.0.1
10
+ matplotlib==3.8.0
11
+ sentencepiece
12
+ urllib3==1.26.18
13
+ timm==1.0.3
14
+ spaces