superdup95 commited on
Commit
7111c0a
1 Parent(s): b029469

Update api_usage.py

Browse files
Files changed (1) hide show
  1. api_usage.py +48 -49
api_usage.py CHANGED
@@ -7,8 +7,9 @@ from dateutil.relativedelta import relativedelta
7
  import boto3
8
  import botocore.exceptions
9
  import concurrent.futures
10
- import asyncio
11
  import aiohttp
 
12
 
13
  BASE_URL = 'https://api.openai.com/v1'
14
  GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4o", "gpt-4-turbo"]
@@ -20,13 +21,15 @@ TOKEN_LIMIT_PER_TIER_TURBO = {
20
  "tier-2": 80000,
21
  "tier-3": 160000,
22
  "tier-4": 1000000,
23
- "tier-5": 2000000
 
24
  }
25
  TOKEN_LIMIT_PER_TIER_GPT4 = {
26
  "tier-1": 10000,
27
  "tier-2": 40000,
28
  "tier-3": 80000,
29
- "tier-4-5": 300000
 
30
  } # according to: https://platform.openai.com/docs/guides/rate-limits/usage-tiers
31
 
32
  RPM_LIMIT_PER_BUILD_TIER_ANT = {
@@ -340,9 +343,9 @@ def check_key_gemini_availability(key):
340
  def check_key_azure_availability(endpoint, api_key):
341
  try:
342
  if endpoint.startswith('http'):
343
- url = f'{endpoint}/openai/models?api-version=2023-03-15-preview'
344
  else:
345
- url = f'https://{endpoint}/openai/models?api-version=2023-03-15-preview'
346
 
347
  headers = {
348
  'User-Agent': 'OpenAI/v1 PythonBindings/0.28.0',
@@ -359,9 +362,9 @@ def check_key_azure_availability(endpoint, api_key):
359
  def get_azure_deploy(endpoint, api_key):
360
  try:
361
  if endpoint.startswith('http'):
362
- url = f'{endpoint}/openai/deployments?api-version=2023-03-15-preview'
363
  else:
364
- url = f'https://{endpoint}/openai/deployments?api-version=2023-03-15-preview'
365
 
366
  headers = {
367
  'User-Agent': 'OpenAI/v1 PythonBindings/0.28.0',
@@ -379,9 +382,9 @@ def get_azure_deploy(endpoint, api_key):
379
  def check_gpt4turbo(endpoint, api_key, deploy_id):
380
  try:
381
  if endpoint.startswith('http'):
382
- url = f'{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2023-03-15-preview'
383
  else:
384
- url = f'https://{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2023-03-15-preview'
385
 
386
  headers = {
387
  'Content-Type': 'application/json',
@@ -442,9 +445,9 @@ def get_azure_status(endpoint, api_key, deployments_list):
442
 
443
  for model, deployment in list_model.items():
444
  if endpoint.startswith('http'):
445
- url = f'{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2023-03-15-preview'
446
  else:
447
- url = f'https://{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2023-03-15-preview'
448
 
449
  headers = {
450
  'Content-Type': 'application/json',
@@ -525,7 +528,7 @@ def check_key_replicate_availability(key):
525
  except:
526
  return "Unknown", "", "", "Error while making request"
527
 
528
- def check_key_aws_availability(key):
529
  access_id = key.split(':')[0]
530
  access_secret = key.split(':')[1]
531
 
@@ -569,7 +572,7 @@ def check_key_aws_availability(key):
569
  if policy['PolicyName'] == 'AmazonBedrockFullAccess':
570
  aws_bedrock_full_access = True
571
 
572
- enable_region = check_bedrock_invoke(session)
573
  cost = check_aws_billing(session)
574
 
575
  return True, username[0], root, admin, quarantine, iam_full_access, iam_user_change_password, aws_bedrock_full_access, enable_region, cost
@@ -592,51 +595,47 @@ def check_policy(iam, username):
592
  except botocore.exceptions.ClientError as error:
593
  return False, error.response['Error']['Code']
594
 
595
- def invoke_claude(session, region, modelId):
596
  try:
597
- bedrock_runtime = session.client("bedrock-runtime", region_name=region)
598
- body = json.dumps({
599
- "prompt": "\n\nHuman:\n\nAssistant:",
600
- "max_tokens_to_sample": 0
601
- })
602
- response = bedrock_runtime.invoke_model(body=body, modelId=modelId)
603
- except bedrock_runtime.exceptions.ValidationException as error:
604
- #print(error.response['Error'])
605
- if 'max_tokens_to_sample' in error.response['Error']['Message']:
606
- return region
607
- except bedrock_runtime.exceptions.AccessDeniedException as error:
608
- #print(error.response['Error'])
609
- return
610
- except bedrock_runtime.exceptions.ResourceNotFoundException as error:
611
- #print(error.response['Error'])
612
- return
613
- except Exception as e:
614
- #print(e)
615
- return
616
-
617
- def invoke_and_collect(session, model_name, region):
618
- result = invoke_claude(session, region, f"anthropic.{model_name}")
619
- if result:
620
- return model_name, result
621
 
622
- def check_bedrock_invoke(session):
623
- regions = ['us-east-1', 'us-west-2', 'eu-central-1', 'eu-west-3', 'ap-southeast-1', 'ap-northeast-1']
624
  models = {
625
  "claude-v2": [],
626
  "claude-3-haiku-20240307-v1:0": [],
627
  "claude-3-sonnet-20240229-v1:0": [],
628
- "claude-3-opus-20240229-v1:0": []
 
629
  }
630
-
631
- with concurrent.futures.ThreadPoolExecutor() as executor:
632
- futures = []
633
  for region in regions:
634
  for model in models:
635
- futures.append(executor.submit(invoke_and_collect, session, model, region))
636
-
637
- for future in concurrent.futures.as_completed(futures):
638
- if future.result():
639
- model_name, region = future.result()
640
  models[model_name].append(region)
641
 
642
  return models
 
7
  import boto3
8
  import botocore.exceptions
9
  import concurrent.futures
10
+ import asyncio, aiohttp
11
  import aiohttp
12
+ from awsLib import bedrock_model_available,bedrock_send_fake_form
13
 
14
  BASE_URL = 'https://api.openai.com/v1'
15
  GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4o", "gpt-4-turbo"]
 
21
  "tier-2": 80000,
22
  "tier-3": 160000,
23
  "tier-4": 1000000,
24
+ "tier-5-old": 2000000,
25
+ "tier-5": 5000000
26
  }
27
  TOKEN_LIMIT_PER_TIER_GPT4 = {
28
  "tier-1": 10000,
29
  "tier-2": 40000,
30
  "tier-3": 80000,
31
+ "tier-4": 300000,
32
+ "tier-5": 1000000
33
  } # according to: https://platform.openai.com/docs/guides/rate-limits/usage-tiers
34
 
35
  RPM_LIMIT_PER_BUILD_TIER_ANT = {
 
343
  def check_key_azure_availability(endpoint, api_key):
344
  try:
345
  if endpoint.startswith('http'):
346
+ url = f'{endpoint}/openai/models?api-version=2022-12-01'
347
  else:
348
+ url = f'https://{endpoint}/openai/models?api-version=2022-12-01'
349
 
350
  headers = {
351
  'User-Agent': 'OpenAI/v1 PythonBindings/0.28.0',
 
362
  def get_azure_deploy(endpoint, api_key):
363
  try:
364
  if endpoint.startswith('http'):
365
+ url = f'{endpoint}/openai/deployments?api-version=2022-12-01'
366
  else:
367
+ url = f'https://{endpoint}/openai/deployments?api-version=2022-12-01'
368
 
369
  headers = {
370
  'User-Agent': 'OpenAI/v1 PythonBindings/0.28.0',
 
382
  def check_gpt4turbo(endpoint, api_key, deploy_id):
383
  try:
384
  if endpoint.startswith('http'):
385
+ url = f'{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2024-02-01'
386
  else:
387
+ url = f'https://{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2024-02-01'
388
 
389
  headers = {
390
  'Content-Type': 'application/json',
 
445
 
446
  for model, deployment in list_model.items():
447
  if endpoint.startswith('http'):
448
+ url = f'{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2024-02-01'
449
  else:
450
+ url = f'https://{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2024-02-01'
451
 
452
  headers = {
453
  'Content-Type': 'application/json',
 
528
  except:
529
  return "Unknown", "", "", "Error while making request"
530
 
531
+ async def check_key_aws_availability(key):
532
  access_id = key.split(':')[0]
533
  access_secret = key.split(':')[1]
534
 
 
572
  if policy['PolicyName'] == 'AmazonBedrockFullAccess':
573
  aws_bedrock_full_access = True
574
 
575
+ enable_region = await check_bedrock_claude_status(access_id, access_secret)
576
  cost = check_aws_billing(session)
577
 
578
  return True, username[0], root, admin, quarantine, iam_full_access, iam_user_change_password, aws_bedrock_full_access, enable_region, cost
 
595
  except botocore.exceptions.ClientError as error:
596
  return False, error.response['Error']['Code']
597
 
598
+ def is_model_working(form_info, model_info):
599
  try:
600
+ form_status = form_info['message']
601
+ agreement_status = model_info['agreementAvailability']['status']
602
+ auth_status = model_info['authorizationStatus']
603
+ entitlementAvai = model_info['entitlementAvailability']
604
+
605
+ if 'formData' in form_status and agreement_status == 'AVAILABLE' and auth_status == 'AUTHORIZED' and entitlementAvai == 'AVAILABLE':
606
+ return True
607
+ return False
608
+ except:
609
+ #print(form_status)
610
+ return False
611
+
612
+ async def get_model_status(session, key, secret, region, model_name, form_info):
613
+ model_info = await bedrock_model_available(session, key, secret, region, f"anthropic.{model_name}")
614
+ model_status = is_model_working(form_info, model_info)
615
+ if model_status:
616
+ return region, model_name
617
+ else:
618
+ return None, None
619
+
620
+ async def check_bedrock_claude_status(key, secret):
621
+ regions = ['us-east-1', 'us-west-2', 'eu-central-1', 'eu-west-3', 'ap-northeast-1', 'ap-southeast-2'] # currently these regions aren't "gated" nor having only "low context" models
 
 
622
 
 
 
623
  models = {
624
  "claude-v2": [],
625
  "claude-3-haiku-20240307-v1:0": [],
626
  "claude-3-sonnet-20240229-v1:0": [],
627
+ "claude-3-opus-20240229-v1:0": [],
628
+ "claude-3-5-sonnet-20240620-v1:0": []
629
  }
630
+ async with aiohttp.ClientSession() as session:
631
+ tasks = []
632
+ form_info = await bedrock_send_fake_form(session, key, secret, "us-east-1", "")
633
  for region in regions:
634
  for model in models:
635
+ tasks.append(get_model_status(session, key, secret, region, model, form_info))
636
+ results = await asyncio.gather(*tasks)
637
+ for region, model_name in results:
638
+ if region and model_name:
 
639
  models[model_name].append(region)
640
 
641
  return models