superdup95 commited on
Commit
5f3a318
1 Parent(s): 55f5592

Update api_usage.py

Browse files
Files changed (1) hide show
  1. api_usage.py +45 -59
api_usage.py CHANGED
@@ -1,17 +1,15 @@
1
  import requests
2
  import os
3
- from datetime import datetime
4
- from dateutil.relativedelta import relativedelta
5
  import openai
6
 
7
- baseUrl = 'https://api.openai.com/v1'
8
  GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
9
- rate_limit_per_model = {
10
  "gpt-3.5-turbo": 2000, # new pay turbo will have 2000 RPM for the first 48 hours then become 3500
11
  "gpt-4": 200,
12
  "gpt-4-32k": 1000
13
  }
14
- body_gpt = {
15
  "gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
16
  "gpt-4": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
17
  "gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
@@ -21,63 +19,54 @@ def get_headers(key):
21
  headers = {'Authorization': f'Bearer {key}'}
22
  return headers
23
 
24
- def get_subscription(key):
25
- headers = get_headers(key)
26
-
27
- if check_key_availability():
28
- rpm = "0"
29
- tpm = "0"
30
- org = ""
31
- quota = ""
32
- has_gpt4_32k = False
33
- has_gpt4 = False
34
- available_models = [model["root"] for model in openai.Model.list()["data"]]
35
- key_highest_model = ""
36
 
37
- if check_gpt4_32k_availability(available_models):
38
- key_highest_model = GPT_TYPES[2]
39
- has_gpt4_32k = True
40
- has_gpt4 = True
41
- elif check_gpt4_availability(available_models):
42
- key_highest_model = GPT_TYPES[1]
43
- has_gpt4 = True
44
- else:
45
- key_highest_model = GPT_TYPES[0]
46
-
47
- r = requests.post(f"{baseUrl}/chat/completions", headers=headers, json=body_gpt[key_highest_model])
48
- result = r.json()
 
49
 
50
- if "id" in result:
51
- rpm = r.headers.get("x-ratelimit-limit-requests", "0")
52
- tpm = r.headers.get("x-ratelimit-limit-tokens", "0")
53
- org = r.headers.get('openai-organization', "")
54
- quota = check_key_type(key_highest_model, int(rpm))
55
- else:
56
- e = result.get("error", {}).get("code", "")
57
- quota = f"Error: {e}"
58
- org = get_org_name(key)
59
-
60
- return {"has_gpt4_32k": has_gpt4_32k,
61
- "has_gpt4": has_gpt4,
62
- "organization": org,
63
- "rpm": f"{rpm} ({key_highest_model})",
64
- "tpm": f"{tpm}",
65
- "quota": quota}
66
  else:
67
- return {"has_gpt4_32k": has_gpt4_32k,
68
- "has_gpt4": has_gpt4,
69
- "organization": "",
70
- "rpm": "",
71
- "tpm": "",
72
- "quota": ""}
 
 
 
 
73
 
74
  def get_org_name(key):
75
  headers=get_headers(key)
76
- r = requests.post(f"{baseUrl}/images/generations", headers=headers)
77
  return r.headers['openai-organization']
78
 
79
  def check_key_type(model, rpm):
80
- if rpm >= rate_limit_per_model[model]:
81
  return "yes | pay"
82
  else:
83
  return "yes | trial"
@@ -96,14 +85,11 @@ def check_gpt4_32k_availability(available_models):
96
 
97
  def check_key_availability():
98
  try:
99
- openai.Model.list()
100
- return True
101
  except:
102
  return False
103
 
104
  if __name__ == "__main__":
105
  key = os.getenv("OPENAI_API_KEY")
106
-
107
- results = get_subscription(key)
108
- for k, v in results.items():
109
- print(f"{k}: {v}")
 
1
  import requests
2
  import os
 
 
3
  import openai
4
 
5
+ BASE_URL = 'https://api.openai.com/v1'
6
  GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
7
+ RATE_LIMIT_PER_MODEL = {
8
  "gpt-3.5-turbo": 2000, # new pay turbo will have 2000 RPM for the first 48 hours then become 3500
9
  "gpt-4": 200,
10
  "gpt-4-32k": 1000
11
  }
12
+ BODY_GPT = {
13
  "gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
14
  "gpt-4": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
15
  "gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
 
19
  headers = {'Authorization': f'Bearer {key}'}
20
  return headers
21
 
22
+ def get_subscription(key, available_models):
23
+ headers = get_headers(key)
24
+ rpm = "0"
25
+ tpm = "0"
26
+ org = ""
27
+ quota = ""
28
+ key_highest_model = ""
29
+ has_gpt4_32k = False
30
+ has_gpt4 = False
 
 
 
31
 
32
+ if check_gpt4_32k_availability(available_models):
33
+ key_highest_model = GPT_TYPES[2]
34
+ has_gpt4_32k = True
35
+ has_gpt4 = True
36
+ elif check_gpt4_availability(available_models):
37
+ key_highest_model = GPT_TYPES[1]
38
+ has_gpt4 = True
39
+ else:
40
+ key_highest_model = GPT_TYPES[0]
41
+
42
+ req_body = {"model": key_highest_model, "messages": [{'role':'user', 'content': ''}], "max_tokens": 1}
43
+ r = requests.post(f"{BASE_URL}/chat/completions", headers=headers, json=req_body)
44
+ result = r.json()
45
 
46
+ if "id" in result:
47
+ rpm = r.headers.get("x-ratelimit-limit-requests", "0")
48
+ tpm = r.headers.get("x-ratelimit-limit-tokens", "0")
49
+ org = r.headers.get('openai-organization', "")
50
+ quota = check_key_type(key_highest_model, int(rpm))
 
 
 
 
 
 
 
 
 
 
 
51
  else:
52
+ e = result.get("error", {}).get("code", "")
53
+ quota = f"Error: {e}"
54
+ org = get_org_name(key)
55
+
56
+ return {"has_gpt4_32k": has_gpt4_32k,
57
+ "has_gpt4": has_gpt4,
58
+ "organization": org,
59
+ "rpm": f"{rpm} ({key_highest_model})",
60
+ "tpm": f"{tpm}",
61
+ "quota": quota}
62
 
63
  def get_org_name(key):
64
  headers=get_headers(key)
65
+ r = requests.post(f"{BASE_URL}/images/generations", headers=headers)
66
  return r.headers['openai-organization']
67
 
68
  def check_key_type(model, rpm):
69
+ if rpm >= RATE_LIMIT_PER_MODEL[model]:
70
  return "yes | pay"
71
  else:
72
  return "yes | trial"
 
85
 
86
  def check_key_availability():
87
  try:
88
+ avai_models = openai.Model.list()
89
+ return [model["root"] for model in avai_models["data"] if model["root"] in GPT_TYPES]
90
  except:
91
  return False
92
 
93
  if __name__ == "__main__":
94
  key = os.getenv("OPENAI_API_KEY")
95
+ results = get_subscription(key)