ArvindSelvaraj commited on
Commit
7438661
1 Parent(s): 1454c9c

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +3 -3
backend.py CHANGED
@@ -6,10 +6,10 @@ import html # For escaping HTML characters
6
  from bs4 import BeautifulSoup
7
  from openai import OpenAI
8
 
9
- # Initialize OpenAI API with Meta's Llama 3.1 405B model
10
  client = OpenAI(
11
  base_url="https://integrate.api.nvidia.com/v1",
12
- api_key="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"
13
  )
14
 
15
  def clean_test_case_output(text):
@@ -46,7 +46,7 @@ def generate_testcases(user_story):
46
  try:
47
  # Call the Nvidia llama API with the refined prompt
48
  completion = client.chat.completions.create(
49
- model="meta/llama-3.1-405b-instruct", # Using llama 3.1 450b
50
  messages=[
51
  {"role": "user", "content": prompt}
52
  ],
 
6
  from bs4 import BeautifulSoup
7
  from openai import OpenAI
8
 
9
+ # Initialize OpenAI API with Nvidia's Llama 3.1 70b nemotron model
10
  client = OpenAI(
11
  base_url="https://integrate.api.nvidia.com/v1",
12
+ api_key="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"
13
  )
14
 
15
  def clean_test_case_output(text):
 
46
  try:
47
  # Call the Nvidia llama API with the refined prompt
48
  completion = client.chat.completions.create(
49
+ model="nvidia/llama-3.1-nemotron-70b-instruct", # Using llama 3.1 70b
50
  messages=[
51
  {"role": "user", "content": prompt}
52
  ],