anilbhatt1 commited on
Commit
5a3b658
Β·
verified Β·
1 Parent(s): 953179b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -201,6 +201,7 @@ import gradio as gr
201
  markdown_description = """
202
  - Jñāna is a Multimodal LLM app that can accept input as image, text or audio
203
  - Based on the input you can query the app for more details
 
204
  - Uses **microsoft/phi-2 qlora** optimized model finetuned on **instruct150k** dataset
205
  - Uses **whisperX** model for audio
206
  """
@@ -213,6 +214,6 @@ demo = gr.Interface(fn=gradio_get_answers_fn,
213
  outputs=gr.Textbox(label="Response"),
214
  title="Jñāna - Phi2 Multiomodal Conversation Agent",
215
  description=markdown_description,
216
- article=" **Credits** : https://theschoolof.ai/ || https://github.com/mshumer/gpt-llm-trainer || https://github.com/huggingface/peft/tree/main/examples/multilayer_perceptron ")
217
 
218
  demo.queue().launch(share=True)
 
201
  markdown_description = """
202
  - Jñāna is a Multimodal LLM app that can accept input as image, text or audio
203
  - Based on the input you can query the app for more details
204
+ - Trained based on Llava 1.0 and Llava 1.5 papers
205
  - Uses **microsoft/phi-2 qlora** optimized model finetuned on **instruct150k** dataset
206
  - Uses **whisperX** model for audio
207
  """
 
214
  outputs=gr.Textbox(label="Response"),
215
  title="Jñāna - Phi2 Multiomodal Conversation Agent",
216
  description=markdown_description,
217
+ article=" **Credits** : https://theschoolof.ai/ || https://arxiv.org/pdf/2304.08485.pdf || https://github.com/mshumer/gpt-llm-trainer || https://github.com/huggingface/peft/tree/main/examples/multilayer_perceptron ")
218
 
219
  demo.queue().launch(share=True)