superdup95 commited on
Commit
c872e63
1 Parent(s): f6729bd

Update api_usage.py

Browse files
Files changed (1) hide show
  1. api_usage.py +11 -10
api_usage.py CHANGED
@@ -11,7 +11,7 @@ import asyncio
11
  import aiohttp
12
 
13
  BASE_URL = 'https://api.openai.com/v1'
14
- GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314"]
15
 
16
  TOKEN_LIMIT_PER_TIER_TURBO = {
17
  "free": 40000,
@@ -73,28 +73,28 @@ def get_subscription(key, session, org_list):
73
  org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
74
  if has_gpt4_32k:
75
  list_models_avai.update(GPT_TYPES)
76
- status_formated = format_status([GPT_TYPES[2], GPT_TYPES[1], GPT_TYPES[0]], session, headers)
77
  rpm.append(status_formated[0])
78
  tpm.append(status_formated[1])
79
  quota.append(status_formated[2])
80
- list_models.append(f"gpt-4-32k, gpt-4, gpt-3.5-turbo ({len(available_models)} total)")
81
  else:
82
  list_models_avai.update([GPT_TYPES[3], GPT_TYPES[1], GPT_TYPES[0]])
83
- status_formated = format_status([GPT_TYPES[3], GPT_TYPES[1], GPT_TYPES[0]], session, headers)
84
  rpm.append(status_formated[0])
85
  tpm.append(status_formated[1])
86
  quota.append(status_formated[2])
87
- list_models.append(f"gpt-4-32k-0314, gpt-4, gpt-3.5-turbo ({len(available_models)} total)")
88
 
89
  elif has_gpt4:
90
  if org_in['id']:
91
  org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
92
  list_models_avai.update([GPT_TYPES[1], GPT_TYPES[0]])
93
- status_formated = format_status([GPT_TYPES[1], GPT_TYPES[0]], session, headers)
94
  rpm.append(status_formated[0])
95
  tpm.append(status_formated[1])
96
  quota.append(status_formated[2])
97
- list_models.append(f"gpt-4, gpt-3.5-turbo ({len(available_models)} total)")
98
 
99
  else:
100
  if org_in['id']:
@@ -137,7 +137,8 @@ def send_oai_completions(oai_stuff):
137
  _tpm = '{:,}'.format(tpm_num).replace(',', ' ')
138
  _tpm_left = '{:,}'.format(tpm_left).replace(',', ' ')
139
  rpm_string = f"{_rpm} ({model})"
140
- tpm_string = f"{_tpm} ({_tpm_left} left, {model})"
 
141
  dictCount = 0
142
  dictLength = len(TOKEN_LIMIT_PER_TIER_GPT4)
143
 
@@ -178,8 +179,8 @@ def format_status(list_models_avai, session, headers):
178
  rpm_str = ""
179
  tpm_str = ""
180
  for i in range(len(rpm)):
181
- rpm_str += rpm[i] + (", " if i < len(rpm)-1 else "")
182
- tpm_str += tpm[i] + (", " if i < len(rpm)-1 else "")
183
  return rpm_str, tpm_str, quota
184
 
185
  def check_key_tier(rpm, tpm, dict, headers):
 
11
  import aiohttp
12
 
13
  BASE_URL = 'https://api.openai.com/v1'
14
+ GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4o", "gpt-4-turbo"]
15
 
16
  TOKEN_LIMIT_PER_TIER_TURBO = {
17
  "free": 40000,
 
73
  org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
74
  if has_gpt4_32k:
75
  list_models_avai.update(GPT_TYPES)
76
+ status_formated = format_status([GPT_TYPES[2], GPT_TYPES[4], GPT_TYPES[5], GPT_TYPES[1], GPT_TYPES[0]], session, headers)
77
  rpm.append(status_formated[0])
78
  tpm.append(status_formated[1])
79
  quota.append(status_formated[2])
80
+ list_models.append(f"gpt-4-32k, gpt-4o, gpt-4-turbo, gpt-4, gpt-3.5-turbo ({len(available_models)} total)")
81
  else:
82
  list_models_avai.update([GPT_TYPES[3], GPT_TYPES[1], GPT_TYPES[0]])
83
+ status_formated = format_status([GPT_TYPES[3], GPT_TYPES[4], GPT_TYPES[5], GPT_TYPES[1], GPT_TYPES[0]], session, headers)
84
  rpm.append(status_formated[0])
85
  tpm.append(status_formated[1])
86
  quota.append(status_formated[2])
87
+ list_models.append(f"gpt-4-32k-0314, gpt-4o, gpt-4-turbo, gpt-4, gpt-3.5-turbo ({len(available_models)} total)")
88
 
89
  elif has_gpt4:
90
  if org_in['id']:
91
  org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
92
  list_models_avai.update([GPT_TYPES[1], GPT_TYPES[0]])
93
+ status_formated = format_status([GPT_TYPES[4], GPT_TYPES[5], GPT_TYPES[1], GPT_TYPES[0]], session, headers)
94
  rpm.append(status_formated[0])
95
  tpm.append(status_formated[1])
96
  quota.append(status_formated[2])
97
+ list_models.append(f"gpt-4o, gpt-4-turbo, gpt-4, gpt-3.5-turbo ({len(available_models)} total)")
98
 
99
  else:
100
  if org_in['id']:
 
137
  _tpm = '{:,}'.format(tpm_num).replace(',', ' ')
138
  _tpm_left = '{:,}'.format(tpm_left).replace(',', ' ')
139
  rpm_string = f"{_rpm} ({model})"
140
+ #tpm_string = f"{_tpm} ({_tpm_left} left, {model})"
141
+ tpm_string = f"{_tpm} ({model})"
142
  dictCount = 0
143
  dictLength = len(TOKEN_LIMIT_PER_TIER_GPT4)
144
 
 
179
  rpm_str = ""
180
  tpm_str = ""
181
  for i in range(len(rpm)):
182
+ rpm_str += rpm[i] + (" | " if i < len(rpm)-1 else "")
183
+ tpm_str += tpm[i] + (" | " if i < len(rpm)-1 else "")
184
  return rpm_str, tpm_str, quota
185
 
186
  def check_key_tier(rpm, tpm, dict, headers):