File size: 13,360 Bytes
c88c1d9
1beaddf
c88c1d9
 
 
b95388b
c88c1d9
 
b95388b
 
c88c1d9
 
 
b95388b
 
 
c88c1d9
b95388b
c88c1d9
 
26f62c4
66b707b
26f62c4
b95388b
 
26f62c4
c88c1d9
26f62c4
c88c1d9
b95388b
 
 
 
 
a779e10
c88c1d9
b95388b
 
 
a779e10
 
c88c1d9
a779e10
c88c1d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66b707b
c88c1d9
26f62c4
b95388b
26f62c4
 
 
b95388b
 
26f62c4
b95388b
c88c1d9
4e14d61
 
c88c1d9
66b707b
 
 
 
 
 
c88c1d9
66b707b
5094d0c
 
c88c1d9
5094d0c
 
66b707b
 
 
 
c88c1d9
 
 
 
 
 
 
 
 
 
 
 
 
 
66b707b
c88c1d9
 
 
66b707b
c88c1d9
66b707b
c88c1d9
 
 
 
 
 
 
 
 
 
 
 
 
 
66b707b
c88c1d9
 
66b707b
c88c1d9
66b707b
c88c1d9
66b707b
c88c1d9
 
 
 
66b707b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c88c1d9
b95388b
 
 
 
c88c1d9
26f62c4
b95388b
 
 
26f62c4
 
b95388b
26f62c4
 
b95388b
26f62c4
b95388b
 
 
26f62c4
b95388b
26f62c4
b95388b
 
 
 
26f62c4
 
 
b95388b
 
 
 
 
 
 
26f62c4
b95388b
 
 
26f62c4
b95388b
 
26f62c4
b95388b
 
 
 
 
 
 
26f62c4
 
b95388b
 
 
 
 
 
 
26f62c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b95388b
26f62c4
 
 
 
 
 
 
 
 
 
 
 
b95388b
26f62c4
b95388b
 
26f62c4
 
 
e5d2292
c88c1d9
 
 
 
e5d2292
 
 
 
66b707b
 
e5d2292
 
66b707b
e5d2292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66b707b
 
 
e5d2292
 
66b707b
e5d2292
 
 
 
66b707b
e5d2292
 
 
 
 
 
 
 
 
 
66b707b
e5d2292
 
 
 
 
 
 
 
66b707b
 
e5d2292
 
 
66b707b
 
 
 
 
 
e5d2292
66b707b
e5d2292
66b707b
e5d2292
 
 
66b707b
e5d2292
 
 
 
66b707b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
import io
import os
import json
import logging
import secrets

import gradio as gr
import numpy as np
import openai
import pandas as pd
from google.oauth2.service_account import Credentials
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
from openai.embeddings_utils import distances_from_embeddings

from .gpt_processor import QuestionAnswerer
from .work_flow_controller import WorkFlowController

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY


class Chatbot:
    def __init__(self) -> None:
        self.history = []
        self.upload_state = "waiting"
        self.uid = self.__generate_uid()

        self.g_drive_service = self.__init_drive_service()
        self.knowledge_base = None
        self.context = None
        self.context_page_num = None
        self.context_file_name = None

    def build_knowledge_base(self, files, upload_mode="once"):
        work_flow_controller = WorkFlowController(files, self.uid)
        self.csv_result_path = work_flow_controller.csv_result_path
        self.json_result_path = work_flow_controller.json_result_path

        if upload_mode == "Upload to Database":
            self.__get_db_knowledge_base()
        else:
            self.__get_local_knowledge_base()

    def __get_db_knowledge_base(self):
        filename = "knowledge_base.csv"
        db = self.__read_db(self.g_drive_service)
        cur_content = pd.read_csv(self.csv_result_path)
        for _ in range(10):
            try:
                self.__write_into_db(self.g_drive_service, db, cur_content)
                break
            except Exception as e:
                logging.error(e)
                logging.error("Failed to upload to database, retrying...")
                continue
        self.knowledge_base = db
        self.upload_state = "done"

    def __get_local_knowledge_base(self):
        with open(self.csv_result_path, "r", encoding="UTF-8") as fp:
            knowledge_base = pd.read_csv(fp)
        knowledge_base["page_embedding"] = (
            knowledge_base["page_embedding"].apply(eval).apply(np.array)
        )

        self.knowledge_base = knowledge_base
        self.upload_state = "done"

    def __write_into_db(self, service, db: pd.DataFrame, cur_content: pd.DataFrame):
        db = pd.concat([db, cur_content], ignore_index=True)
        db.to_csv(f"{self.uid}_knowledge_base.csv", index=False)
        media = MediaFileUpload(f"{self.uid}_knowledge_base.csv", resumable=True)
        request = (
            service.files()
            .update(fileId="1m3ozrphHP221hhdCFMFX9-10nzSDfNyW", media_body=media)
            .execute()
        )

    def __init_drive_service(self):
        SCOPES = ["https://www.googleapis.com/auth/drive"]
        SERVICE_ACCOUNT_INFO = os.getenv("CREDENTIALS")
        service_account_info_dict = json.loads(SERVICE_ACCOUNT_INFO)

        creds = Credentials.from_service_account_info(
            service_account_info_dict, scopes=SCOPES
        )

        return build("drive", "v3", credentials=creds)

    def __read_db(self, service):
        request = service.files().get_media(fileId="1m3ozrphHP221hhdCFMFX9-10nzSDfNyW")
        fh = io.BytesIO()
        downloader = MediaIoBaseDownload(fh, request)

        done = False
        while done is False:
            status, done = downloader.next_chunk()
            print(f"Download {int(status.progress() * 100)}%.")

        # file_content = fh.getvalue().decode('utf-8')
        fh.seek(0)

        return pd.read_csv(fh)

    def __read_file(self, service, filename) -> pd.DataFrame:
        query = f"name='{filename}'"
        results = service.files().list(q=query).execute()
        files = results.get("files", [])

        file_id = files[0]["id"]

        request = service.files().get_media(fileId=file_id)
        fh = io.BytesIO()
        downloader = MediaIoBaseDownload(fh, request)

        done = False
        while done is False:
            status, done = downloader.next_chunk()
            print(f"Download {int(status.progress() * 100)}%.")

        # file_content = fh.getvalue().decode('utf-8')
        fh.seek(0)

        return pd.read_csv(fh)

    def __upload_file(self, service):
        results = service.files().list(pageSize=10).execute()
        items = results.get("files", [])
        if not items:
            print("No files found.")
        else:
            print("Files:")
            for item in items:
                print(f"{item['name']} ({item['id']})")

        media = MediaFileUpload(self.csv_result_path, resumable=True)
        filename_prefix = "ex_bot_database_"
        filename = filename_prefix + self.uid + ".csv"
        request = (
            service.files()
            .create(
                media_body=media,
                body={
                    "name": filename,
                    "parents": [
                        "1Lp21EZlVlqL-c27VQBC6wTbUC1YpKMsG"
                    ],  # Optional, to place the file in a specific folder
                },
            )
            .execute()
        )

    def clear_state(self):
        self.context = None
        self.context_page_num = None
        self.context_file_name = None
        self.knowledge_base = None
        self.upload_state = "waiting"
        self.history = []

    def send_system_nofification(self):
        if self.upload_state == "waiting":
            conversation = [["已上傳文件", "文件處理中(摘要、翻譯等),結束後將自動回覆"]]
            return conversation
        elif self.upload_state == "done":
            conversation = [["已上傳文件", "文件處理完成,請開始提問"]]
            return conversation

    def change_md(self):
        content = self.__construct_summary()
        return gr.Markdown.update(content, visible=True)

    def __construct_summary(self):
        with open(self.json_result_path, "r", encoding="UTF-8") as fp:
            knowledge_base = json.load(fp)

        context = """"""
        for key in knowledge_base.keys():
            file_name = knowledge_base[key]["file_name"]
            total_page = knowledge_base[key]["total_pages"]
            summary = knowledge_base[key]["summarized_content"]
            file_context = f"""
                ### 文件摘要
                {file_name}  (共 {total_page} 頁)<br><br>
                {summary}<br><br>
            """
            context += file_context
        return context

    def user(self, message):
        self.history += [[message, None]]
        return "", self.history

    def bot(self):
        user_message = self.history[-1][0]
        print(f"user_message: {user_message}")

        if self.knowledge_base is None:
            response = [
                [user_message, "請先上傳文件"],
            ]
            self.history = response
            return self.history

        else:
            self.__get_index_file(user_message)
            if self.context is None:
                response = [
                    [user_message, "無法找到相關文件,請重新提問"],
                ]
                self.history = response
                return self.history
            else:
                qa_processor = QuestionAnswerer()
                bot_message = qa_processor.answer_question(
                    self.context,
                    self.context_page_num,
                    self.context_file_name,
                    self.history,
                )
                print(f"bot_message: {bot_message}")
                response = [
                    [user_message, bot_message],
                ]
                self.history[-1] = response[0]
                return self.history

    def __get_index_file(self, user_message):
        user_message_embedding = openai.Embedding.create(
            input=user_message, engine="text-embedding-ada-002"
        )["data"][0]["embedding"]

        self.knowledge_base["distance"] = distances_from_embeddings(
            user_message_embedding,
            self.knowledge_base["page_embedding"].values,
            distance_metric="cosine",
        )
        self.knowledge_base = self.knowledge_base.sort_values(
            by="distance", ascending=True
        )

        if self.knowledge_base["distance"].values[0] > 0.2:
            self.context = None
        else:
            self.context = self.knowledge_base["page_content"].values[0]
            self.context_page_num = self.knowledge_base["page_num"].values[0]
            self.context_file_name = self.knowledge_base["file_name"].values[0]

    def __generate_uid(self):
        return secrets.token_hex(8)


class VideoChatbot:
    def __init__(self) -> None:
        self.metadata_keys = ["標題", "逐字稿", "摘要", "關鍵字"]
        self.metadata = {
            "c2fK-hxnPSY": {
                "標題": "可汗學院的創新教學:學生與老師模式解析",
                "逐字稿": "0:00\n這裡是一個關於西班牙美洲戰爭和AP美國歷史的練習\n0:04\n在可汗學院,我們以學生模式開始,並注意到如果學生要求解釋\n0:11\n它不只是給出答案,它會像一個好的導師一樣,只是試圖引導\n0:15\n學生朝正確的方向前進,並且還注意到老師可以看到\n0:21\n學生正在互動的內容作為安全措施,現在如果我們關閉學生模式,我們\n0:27\n進入老師模式,我們看到當老師要求解釋時,它非常不同,就像\n0:32\n有了老師的指南,它會給出如你所見的非常詳細的解釋,如果老師\n0:39\n想要它的教案,他們只需要要求,他們就會得到一個非常詳細的\n0:44\n教案,包括目標、活動和家庭作業要做的事情,然後如果老師\n0:52\n說太好了,Khanmigo,你說給一個講義或者作為家庭作業給一個反思\n0:58\n實際上給了反思作業,然後它會再次為老師構建那個\n1:03\n如果老師喜歡,他們可以要求自定義這些教案或這些提示或者這些\n1:08\n反思,讓它們更符合他們的學生正在做的事情,這是老師們通常花費\n1:13\n每天好幾個小時工作的事情,我們希望能夠節省\n1:17\n他們很多時間和精力,以利他們自己的健康和他們的學生。",
                "摘要": "這段文字描述了一個關於西班牙美洲戰爭和AP美國歷史的教學練習。練習首先展示學生模式,強調良好的教導方式並提到教師可以監控學生互動情況作為安全措施。隨後,進入老師模式,提供了詳細的解釋和教案,包括目標、活動和家庭作業。另外,還有一個自定義教案的選項,使其更符合學生的需求。整個過程旨在節省教師的時間和精力,並有助於他們的健康和學生的學習。",
                "關鍵字": ["AP美國歷史", "學生模式", "老師模式", "教案設計", "自定義教學"],
            }
        }

    def answer_question(self, user_message):
        self.video_id = "c2fK-hxnPSY"

        index = self.compute_similariy(user_message)
        if index is None:
            return "無法找到相關資訊,請重新提問"

        context = self.metadata[self.video_id][index]

        system_prompt = """
            你是一個知識檢索系統,我會給你一份文件,請幫我依照文件內容回答問題,並用繁體中文回答。以下是文件內容
        """
        messages = [
            {"role": "system", "content": f"{system_prompt} + '\n' '''{context}'''"},
            {"role": "user", "content": user_message},
        ]
        try:
            response = openai.ChatCompletion.create(
                model="gpt-3.5-turbo",
                messages=messages,
                temperature=1,
                max_tokens=2048,
                frequency_penalty=0,
                presence_penalty=0.6,
            )
            bot_answer = response["choices"][0]["message"]["content"]

            return bot_answer
        except Exception as e:
            logging.error(e)
            logging.error("Failed to answer question")

    def compute_similariy(self, user_message):
        threshold = 0.5

        user_message_embedding = openai.Embedding.create(
            input=user_message, engine="text-embedding-ada-002"
        )["data"][0]["embedding"]

        index_embedding = {}

        for index in self.metadata_keys:
            index_embedding[index] = openai.Embedding.create(
                input=self.metadata[self.video_id][index],
                engine="text-embedding-ada-002",
            )["data"][0]["embedding"]

        # turn index_embedding into a dataframe
        index_embedding = pd.DataFrame(
            {
                "title": [list(index_embedding.keys())[0]],
                "embedding": [list(index_embedding.values())[0]],
            }
        )

        index_embedding["distance"] = distances_from_embeddings(
            user_message_embedding,
            index_embedding["embedding"].values,
            distance_metric="cosine",
        )

        index_embedding = index_embedding.sort_values(by="distance", ascending=True)

        if index_embedding["distance"].values[0] > threshold:
            return None
        else:
            return index_embedding["title"][0]