ArvindSelvaraj commited on
Commit
2b8f81c
1 Parent(s): ac972ad

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +4 -4
backend.py CHANGED
@@ -6,7 +6,7 @@ import html # For escaping HTML characters
6
  from bs4 import BeautifulSoup
7
  from openai import OpenAI
8
 
9
- # Initialize OpenAI API with Nvidia's Mistral model
10
  client = OpenAI(
11
  base_url="https://integrate.api.nvidia.com/v1",
12
  api_key="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"
@@ -24,7 +24,7 @@ def clean_test_case_output(text):
24
  def generate_testcases(user_story):
25
  """
26
  Generates advanced QA test cases based on a provided user story by interacting
27
- with Nvidia's Mistral model API. The prompt is refined for clarity,
28
  and the output is processed for better quality.
29
 
30
  :param user_story: A string representing the user story for which to generate test cases.
@@ -84,9 +84,9 @@ def generate_testcases(user_story):
84
  prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
85
 
86
  try:
87
- # Call the Nvidia Mistral API with the refined prompt
88
  completion = client.chat.completions.create(
89
- model="meta/llama-3.1-405b-instruct", # Using Mistral model
90
  messages=[
91
  {"role": "user", "content": prompt}
92
  ],
 
6
  from bs4 import BeautifulSoup
7
  from openai import OpenAI
8
 
9
+ # Initialize OpenAI API with Nvidia's llama model
10
  client = OpenAI(
11
  base_url="https://integrate.api.nvidia.com/v1",
12
  api_key="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"
 
24
  def generate_testcases(user_story):
25
  """
26
  Generates advanced QA test cases based on a provided user story by interacting
27
+ with Nvidia's llama model API. The prompt is refined for clarity,
28
  and the output is processed for better quality.
29
 
30
  :param user_story: A string representing the user story for which to generate test cases.
 
84
  prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
85
 
86
  try:
87
+ # Call the Nvidia llama API with the refined prompt
88
  completion = client.chat.completions.create(
89
+ model="meta/llama-3.1-405b-instruct", # Using llama3.1 405b model
90
  messages=[
91
  {"role": "user", "content": prompt}
92
  ],