File size: 3,468 Bytes
98c28c1 e0ae8d4 21050e2 e0ae8d4 21050e2 e0ae8d4 21050e2 7be74d6 06cc1fc f85e3ed 7be74d6 98c28c1 2e4d2e9 e0ae8d4 98c28c1 e0ae8d4 21050e2 e0ae8d4 45382eb e0ae8d4 63bc79a e0ae8d4 45382eb e0ae8d4 e131090 e0ae8d4 e131090 98c28c1 e0ae8d4 21050e2 e0ae8d4 39d068f 98c28c1 e0ae8d4 98c28c1 e131090 e0ae8d4 e131090 e0ae8d4 e131090 e0ae8d4 98c28c1 e0ae8d4 21050e2 e0ae8d4 98c28c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import requests
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openai
baseUrl = 'https://api.openai.com/v1'
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
rate_limit_per_model = {
"gpt-3.5-turbo": 2000, # new pay turbo will have 2000 RPM for the first 48 hours then become 3500
"gpt-4": 200,
"gpt-4-32k": 1000
}
body_gpt = {
"gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
"gpt-4": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
"gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
}
def get_headers(key):
headers = {'Authorization': f'Bearer {key}'}
return headers
def get_subscription(key):
headers = get_headers(key)
if check_key_availability():
rpm = "0"
tpm = "0"
org = ""
quota = ""
has_gpt4_32k = False
has_gpt4 = False
available_models = [model["root"] for model in openai.Model.list()["data"]]
key_highest_model = ""
if check_gpt4_32k_availability(available_models):
key_highest_model = GPT_TYPES[2]
has_gpt4_32k = True
has_gpt4 = True
elif check_gpt4_availability(available_models):
key_highest_model = GPT_TYPES[1]
has_gpt4 = True
else:
key_highest_model = GPT_TYPES[0]
r = requests.post(f"{baseUrl}/chat/completions", headers=headers, json=body_gpt[key_highest_model])
result = r.json()
if "id" in result:
rpm = r.headers.get("x-ratelimit-limit-requests", "0")
tpm = r.headers.get("x-ratelimit-limit-tokens", "0")
org = r.headers.get('openai-organization', "")
quota = check_key_type(key_highest_model, int(rpm))
else:
e = result.get("error", {}).get("code", "")
quota = f"Error: {e}"
org = get_org_name(key)
return {"has_gpt4_32k": has_gpt4_32k,
"has_gpt4": has_gpt4,
"organization": org,
"rpm": f"{rpm} ({key_highest_model})",
"tpm": f"{tpm}",
"quota": quota}
else:
return {"has_gpt4_32k": has_gpt4_32k,
"has_gpt4": has_gpt4,
"organization": "",
"rpm": "",
"tpm": "",
"quota": ""}
def get_org_name(key):
headers=get_headers(key)
r = requests.post(f"{baseUrl}/images/generations", headers=headers)
return r.headers['openai-organization']
def check_key_type(model, rpm):
if rpm >= rate_limit_per_model[model]:
return "yes | pay"
else:
return "yes | trial"
def check_gpt4_availability(available_models):
if 'gpt-4' in available_models:
return True
else:
return False
def check_gpt4_32k_availability(available_models):
if 'gpt-4-32k' in available_models:
return True
else:
return False
def check_key_availability():
try:
openai.Model.list()
return True
except:
return False
if __name__ == "__main__":
key = os.getenv("OPENAI_API_KEY")
results = get_subscription(key)
for k, v in results.items():
print(f"{k}: {v}") |