rbughao commited on
Commit
663fbef
·
verified ·
1 Parent(s): c991a93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -47
app.py CHANGED
@@ -1,10 +1,10 @@
1
  #for learning
2
  import os
3
- #import openai
4
  import gradio as gr
5
 
6
- #openai.api_key = os.environ.get('O_APIKey')
7
- HF_Token = os.environ.get('HF_Token')
8
  Data_Read = os.environ.get('Data_Reader')
9
  ChurnData = os.environ.get('Churn_Data')
10
  ChurnData2 = os.environ.get('Churn_Data2')
@@ -12,47 +12,6 @@ ChurnData2 = os.environ.get('Churn_Data2')
12
  #read data
13
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex, download_loader
14
 
15
- #new
16
- from huggingface_hub import login
17
- import json
18
- login(token=HF_Token)
19
-
20
- repo_id = "microsoft/Phi-3-mini-4k-instruct"
21
-
22
- llm_client = inferenceClient(
23
- model = repo_id,
24
- timeout = 120,
25
- )
26
-
27
- def reply(inference_client: InferenceClient, prompt: str):
28
- response = inference_client.post(
29
- json={
30
- "inputs":prompt,
31
- "parameters":{"max_new_tokens":200},
32
- "tasks":"text-generation",
33
- },
34
- )
35
- answer = json.loads(response.decode())[0]["generated_text"]
36
- return answer
37
-
38
- #from llama_index.llms.ollama import Ollama
39
- #from llama_index.embeddings.huggingface import HuggingFaceEmbedding
40
- #from llama_index.core import Settings
41
-
42
- #Settings.llm = Ollama(model="distilbert", request_timeout=120.0)
43
- #Settings.embed_model = HuggingFaceEmbedding(
44
- # model_name="distilbert/distilgpt2"
45
- ##model_name="meta-llama/Llama-2-7b-chat-hf"
46
- #)
47
-
48
- #from transformers import pipeline
49
- #pipe = pipeline("text-generation", model="distilbert/distilgpt2")
50
- #from transformers import AutoTokenizer, AutoModelForCausalLM
51
- #tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
52
- #model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
53
-
54
- #new
55
-
56
  DataReader = download_loader(Data_Read)
57
  loader = DataReader()
58
 
@@ -69,9 +28,9 @@ documents = documents + documents2
69
  index = VectorStoreIndex.from_documents(documents)
70
  query_engine = index.as_query_engine()
71
 
72
- #def reply(message, history):
73
- # answer = str(query_engine.query(message))
74
- # return answer
75
 
76
  Conversing = gr.ChatInterface(reply, chatbot=gr.Chatbot(height="70vh"), retry_btn=None,theme=gr.themes.Monochrome(),
77
  title = 'BT Accor Q&A', undo_btn = None, clear_btn = None, css='footer {visibility: hidden}').launch()
 
1
  #for learning
2
  import os
3
+ import openai
4
  import gradio as gr
5
 
6
+ openai.api_key = os.environ.get('O_APIKey')
7
+ #HF_Token = os.environ.get('HF_Token')
8
  Data_Read = os.environ.get('Data_Reader')
9
  ChurnData = os.environ.get('Churn_Data')
10
  ChurnData2 = os.environ.get('Churn_Data2')
 
12
  #read data
13
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex, download_loader
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  DataReader = download_loader(Data_Read)
16
  loader = DataReader()
17
 
 
28
  index = VectorStoreIndex.from_documents(documents)
29
  query_engine = index.as_query_engine()
30
 
31
+ def reply(message, history):
32
+ answer = str(query_engine.query(message))
33
+ return answer
34
 
35
  Conversing = gr.ChatInterface(reply, chatbot=gr.Chatbot(height="70vh"), retry_btn=None,theme=gr.themes.Monochrome(),
36
  title = 'BT Accor Q&A', undo_btn = None, clear_btn = None, css='footer {visibility: hidden}').launch()