lorocksUMD commited on
Commit
12dcab4
1 Parent(s): 14d2652

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer
4
  from llava.model.language_model import *
 
5
 
6
  """
7
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -9,13 +10,19 @@ For more information on `huggingface_hub` Inference API support, please check th
9
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
 
11
  model_path = "liuhaotian/llava-v1.6-mistral-7b"
12
- tokenizer = AutoTokenizer.from_pretrained(model_path)
 
13
  # model = LlavaMistralForCausalLM.from_pretrained(
14
  # model_path,
15
  # low_cpu_mem_usage=True,
16
  # # offload_folder="/content/sample_data"
17
  # )
18
 
 
 
 
 
 
19
  def respond(
20
  message,
21
  history: list[tuple[str, str]],
 
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer
4
  from llava.model.language_model import *
5
+ from llava.mobel.builder import load_pretrained_model
6
 
7
  """
8
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
12
  model_path = "liuhaotian/llava-v1.6-mistral-7b"
13
+ model_name = get_model_name_from_path(model_path)
14
+ # tokenizer = AutoTokenizer.from_pretrained(model_path)
15
  # model = LlavaMistralForCausalLM.from_pretrained(
16
  # model_path,
17
  # low_cpu_mem_usage=True,
18
  # # offload_folder="/content/sample_data"
19
  # )
20
 
21
+ tokenizer, model, image_processor, context_len = load_pretrained_model(
22
+ model_path, None, model_name
23
+ )
24
+
25
+
26
  def respond(
27
  message,
28
  history: list[tuple[str, str]],