|
import requests |
|
import os |
|
from datetime import datetime |
|
from dateutil.relativedelta import relativedelta |
|
import openai |
|
|
|
queryUrl = 'https://api.openai.com/v1/chat/completions' |
|
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"] |
|
rate_limit_per_model = { |
|
"gpt-3.5-turbo-trial": 2000, |
|
"gpt-3.5-turbo-pay": 3500, |
|
"gpt-4": 200, |
|
"gpt-4-32k": 1000 |
|
} |
|
body_gpt = { |
|
"gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}, |
|
"gpt-4": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}, |
|
"gpt-4-32k": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]} |
|
} |
|
|
|
|
|
def get_headers(key): |
|
headers = {'Authorization': f'Bearer {key}'} |
|
return headers |
|
|
|
def get_subscription(key): |
|
headers = get_headers(key) |
|
|
|
|
|
key_highest_model = "" |
|
if check_gpt4_32k_availability(): |
|
key_highest_model = GPT_TYPES[2] |
|
elif check_gpt4_availability(): |
|
key_highest_model = GPT_TYPES[1] |
|
else: |
|
key_highest_model = GPT_TYPES[0] |
|
|
|
if check_key_availability(): |
|
rpm = "" |
|
org = "" |
|
quota = "" |
|
r = requests.post(queryUrl, headers=headers, json=body_gpt[key_highest_model]) |
|
result = r.json() |
|
if "id" in result: |
|
rpm = r.headers['x-ratelimit-limit-requests'] |
|
org = r.headers['openai-organization'] |
|
quota = check_key_type(key_highest_model, int(rpm)) |
|
else: |
|
e = result["error"]["code"] |
|
quota = f"Error: {e}" |
|
|
|
return {"organization": org, |
|
"rpm": rpm, |
|
"quota": quota} |
|
else: |
|
return {"organization": "", |
|
"rpm": "", |
|
"quota": ""} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_key_type(model, rpm): |
|
if model == GPT_TYPES[0]: |
|
if rpm > rate_limit_per_model['gpt-3.5-turbo-pay']: |
|
return "yes | pay, possibly big key" |
|
elif rpm > rate_limit_per_model['gpt-3.5-turbo-trial'] and rpm <= rate_limit_per_model['gpt-3.5-turbo-pay']: |
|
return "yes | pay" |
|
else: |
|
return "yes | trial" |
|
else: |
|
if rpm < rate_limit_per_model[model]: |
|
return "yes | trial" |
|
elif rpm == rate_limit_per_model[model]: |
|
return "yes | pay" |
|
else: |
|
return "yes | pay, possibly big key" |
|
|
|
def check_gpt4_availability(): |
|
if check_key_availability(): |
|
available_models = [model["root"] for model in openai.Model.list()["data"]] |
|
if 'gpt-4' in available_models: |
|
return True |
|
else: |
|
return False |
|
else: |
|
return False |
|
|
|
def check_gpt4_32k_availability(): |
|
if check_key_availability(): |
|
available_models = [model["root"] for model in openai.Model.list()["data"]] |
|
if 'gpt-4-32k' in available_models: |
|
return True |
|
else: |
|
return False |
|
else: |
|
return False |
|
|
|
def check_key_availability(): |
|
try: |
|
openai.Model.list() |
|
return True |
|
except: |
|
return False |
|
|
|
if __name__ == "__main__": |
|
key = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
|
|
results = get_subscription(key) |
|
for k, v in results.items(): |
|
print(f"{k}: {v}") |