Spaces:
Sleeping
Sleeping
handle ratelimit
Browse files- __pycache__/utils.cpython-310.pyc +0 -0
- utils.py +25 -12
__pycache__/utils.cpython-310.pyc
CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
|
|
utils.py
CHANGED
@@ -3,6 +3,7 @@ import torch
|
|
3 |
from skinmodels import IsSkinResnet, IsHealthySkinResnet, SkinDiseaseModelResnet
|
4 |
from config import read_disease_step, read_label_decode, transform_img
|
5 |
import openai
|
|
|
6 |
from config_path import SKINMODEL1_PATH, SKINMODEL2_PATH, SKINMODEL3_PATH, SKINDISEASE_STEP_PATH, SKINLABEL_DECODE_PATH
|
7 |
|
8 |
def diagnosis(img):
|
@@ -48,22 +49,34 @@ def diagnosis(img):
|
|
48 |
|
49 |
return label_decode[disease], disease_to_link[label_decode[disease]], disease_to_step[label_decode[disease]]
|
50 |
|
51 |
-
def getMessage(msg):
|
52 |
-
openRespon = openai.ChatCompletion.create(
|
53 |
-
model="gpt-3.5-turbo",
|
54 |
-
messages=msg,
|
55 |
-
temperature=0.5
|
56 |
-
)
|
57 |
-
reply = openRespon.choices[0].message.content
|
58 |
-
return reply
|
59 |
|
60 |
-
def
|
61 |
try:
|
62 |
openRespon = openai.ChatCompletion.create(
|
63 |
-
|
64 |
-
messages=
|
65 |
temperature=0.5
|
66 |
)
|
67 |
-
|
|
|
|
|
|
|
68 |
except:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
return False
|
|
|
3 |
from skinmodels import IsSkinResnet, IsHealthySkinResnet, SkinDiseaseModelResnet
|
4 |
from config import read_disease_step, read_label_decode, transform_img
|
5 |
import openai
|
6 |
+
import time
|
7 |
from config_path import SKINMODEL1_PATH, SKINMODEL2_PATH, SKINMODEL3_PATH, SKINDISEASE_STEP_PATH, SKINLABEL_DECODE_PATH
|
8 |
|
9 |
def diagnosis(img):
|
|
|
49 |
|
50 |
return label_decode[disease], disease_to_link[label_decode[disease]], disease_to_step[label_decode[disease]]
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
def getMessage(msg):
|
54 |
try:
|
55 |
openRespon = openai.ChatCompletion.create(
|
56 |
+
model="gpt-3.5-turbo",
|
57 |
+
messages=msg,
|
58 |
temperature=0.5
|
59 |
)
|
60 |
+
reply = openRespon.choices[0].message.content
|
61 |
+
return reply
|
62 |
+
except openai.error.RateLimitError:
|
63 |
+
return "Dokter saat ini sedang sibuk, tolong tunggu beberapa saat lagi"
|
64 |
except:
|
65 |
+
return "Maaf, sepertinya dokter tidak bisa menjawab saat ini. Silahkan refresh halaman. Jika masalah masih terus berlanjut, silahkan hubungi admin"
|
66 |
+
|
67 |
+
last_apikey_check = int(round(time.time()*1000))-35*1000
|
68 |
+
def cekApikey():
|
69 |
+
try:
|
70 |
+
global last_apikey_check
|
71 |
+
t_stamp = int(round(time.time())*1000)
|
72 |
+
if((t_stamp-last_apikey_check) > 30*1000):
|
73 |
+
openRespon = openai.ChatCompletion.create(
|
74 |
+
model="gpt-3.5-turbo",
|
75 |
+
messages=[{"role" : "user", "content": "i"}],
|
76 |
+
temperature=0.5
|
77 |
+
)
|
78 |
+
last_apikey_check = t_stamp
|
79 |
+
return True
|
80 |
+
except Exception as err:
|
81 |
+
print(err)
|
82 |
return False
|