File size: 3,041 Bytes
98c28c1
 
 
 
5f3a318
21050e2
5f3a318
e0ae8d4
21050e2
e0ae8d4
21050e2
5f3a318
06cc1fc
 
f85e3ed
7be74d6
 
98c28c1
 
 
 
5f3a318
 
 
 
 
 
 
 
 
e0ae8d4
5f3a318
 
 
 
 
 
 
 
 
 
 
 
 
e0ae8d4
5f3a318
 
 
 
 
98c28c1
5f3a318
 
 
 
 
 
 
 
 
 
98c28c1
e0ae8d4
 
5f3a318
e0ae8d4
98c28c1
e131090
5f3a318
e0ae8d4
e131090
e0ae8d4
e131090
e0ae8d4
 
 
98c28c1
 
 
e0ae8d4
 
 
21050e2
 
e0ae8d4
98c28c1
 
5f3a318
 
98c28c1
 
 
 
 
5f3a318
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import requests
import os
import openai

BASE_URL = 'https://api.openai.com/v1'
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
RATE_LIMIT_PER_MODEL = {
    "gpt-3.5-turbo": 2000, # new pay turbo will have 2000 RPM for the first 48 hours then become 3500
    "gpt-4": 200,
    "gpt-4-32k": 1000
}
BODY_GPT = {
    "gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
    "gpt-4": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
    "gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
}

def get_headers(key):
    headers = {'Authorization': f'Bearer {key}'}
    return headers

def get_subscription(key, available_models):    
    headers = get_headers(key)    
    rpm = "0"        
    tpm = "0"
    org = ""
    quota = ""    
    key_highest_model = ""
    has_gpt4_32k = False
    has_gpt4 = False
        
    if check_gpt4_32k_availability(available_models):
        key_highest_model = GPT_TYPES[2]
        has_gpt4_32k = True
        has_gpt4 = True
    elif check_gpt4_availability(available_models):
        key_highest_model = GPT_TYPES[1]
        has_gpt4 = True
    else:
        key_highest_model = GPT_TYPES[0]  

    req_body = {"model": key_highest_model, "messages": [{'role':'user', 'content': ''}], "max_tokens": 1}
    r = requests.post(f"{BASE_URL}/chat/completions", headers=headers, json=req_body)
    result = r.json()
        
    if "id" in result:            
        rpm = r.headers.get("x-ratelimit-limit-requests", "0")
        tpm = r.headers.get("x-ratelimit-limit-tokens", "0")
        org = r.headers.get('openai-organization', "")
        quota = check_key_type(key_highest_model, int(rpm))      
    else:
        e = result.get("error", {}).get("code", "")
        quota = f"Error: {e}"
        org = get_org_name(key)
            
    return {"has_gpt4_32k": has_gpt4_32k,
            "has_gpt4": has_gpt4,
            "organization": org,
            "rpm": f"{rpm} ({key_highest_model})",
            "tpm": f"{tpm}",
            "quota": quota}

def get_org_name(key):
    headers=get_headers(key)
    r = requests.post(f"{BASE_URL}/images/generations", headers=headers)
    return r.headers['openai-organization']

def check_key_type(model, rpm):
    if rpm >= RATE_LIMIT_PER_MODEL[model]:
        return "yes | pay"
    else:
        return "yes | trial"
    
def check_gpt4_availability(available_models):
    if 'gpt-4' in available_models:
        return True
    else:
        return False

def check_gpt4_32k_availability(available_models):
    if 'gpt-4-32k' in available_models:
        return True
    else:
        return False
        
def check_key_availability():
    try:
        avai_models = openai.Model.list()
        return [model["root"] for model in avai_models["data"] if model["root"] in GPT_TYPES]
    except:
        return False

if __name__ == "__main__":
    key = os.getenv("OPENAI_API_KEY")
    results = get_subscription(key)