ArvindSelvaraj
commited on
Commit
•
1454c9c
1
Parent(s):
a8e8560
Update backend.py
Browse files- backend.py +54 -40
backend.py
CHANGED
@@ -1,13 +1,16 @@
|
|
1 |
import csv
|
2 |
import io
|
|
|
3 |
import json
|
4 |
import html # For escaping HTML characters
|
5 |
from bs4 import BeautifulSoup
|
6 |
-
from
|
7 |
|
8 |
-
#
|
9 |
-
|
10 |
-
|
|
|
|
|
11 |
|
12 |
def clean_test_case_output(text):
|
13 |
"""
|
@@ -21,7 +24,7 @@ def clean_test_case_output(text):
|
|
21 |
def generate_testcases(user_story):
|
22 |
"""
|
23 |
Generates advanced QA test cases based on a provided user story by interacting
|
24 |
-
with
|
25 |
and the output is processed for better quality.
|
26 |
|
27 |
:param user_story: A string representing the user story for which to generate test cases.
|
@@ -30,49 +33,60 @@ def generate_testcases(user_story):
|
|
30 |
|
31 |
# Few-shot learning examples to guide the model
|
32 |
few_shot_examples = """
|
|
|
33 |
"if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
|
34 |
-
"Generate as many
|
35 |
"Understand the story thoroughly"
|
36 |
"If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
|
37 |
"""
|
38 |
|
39 |
# Combine the few-shot examples with the user story for the model to process
|
40 |
-
prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
|
41 |
-
|
42 |
-
# Tokenize the prompt
|
43 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
44 |
-
|
45 |
-
# Generate text with the model
|
46 |
-
outputs = model.generate(
|
47 |
-
**inputs,
|
48 |
-
max_length=4096,
|
49 |
-
temperature=0.03,
|
50 |
-
top_p=0.7,
|
51 |
-
do_sample=False
|
52 |
-
)
|
53 |
-
|
54 |
-
# Decode the generated text
|
55 |
-
test_cases_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
56 |
-
|
57 |
-
# Ensure the entire response is captured before cleaning
|
58 |
-
if test_cases_text.strip() == "":
|
59 |
-
return [{"test_case": "No test cases generated or output was empty."}]
|
60 |
-
|
61 |
-
# Clean the output by unescaping HTML entities and replacing <br> tags
|
62 |
-
test_cases_text = clean_test_case_output(test_cases_text)
|
63 |
|
64 |
try:
|
65 |
-
#
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
# Export test cases in CSV format
|
78 |
def export_test_cases(test_cases, format='csv'):
|
|
|
1 |
import csv
|
2 |
import io
|
3 |
+
import requests
|
4 |
import json
|
5 |
import html # For escaping HTML characters
|
6 |
from bs4 import BeautifulSoup
|
7 |
+
from openai import OpenAI
|
8 |
|
9 |
+
# Initialize OpenAI API with Meta's Llama 3.1 405B model
|
10 |
+
client = OpenAI(
|
11 |
+
base_url="https://integrate.api.nvidia.com/v1",
|
12 |
+
api_key="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"
|
13 |
+
)
|
14 |
|
15 |
def clean_test_case_output(text):
|
16 |
"""
|
|
|
24 |
def generate_testcases(user_story):
|
25 |
"""
|
26 |
Generates advanced QA test cases based on a provided user story by interacting
|
27 |
+
with Nvidia's llama model API. The prompt is refined for clarity,
|
28 |
and the output is processed for better quality.
|
29 |
|
30 |
:param user_story: A string representing the user story for which to generate test cases.
|
|
|
33 |
|
34 |
# Few-shot learning examples to guide the model
|
35 |
few_shot_examples = """
|
36 |
+
|
37 |
"if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
|
38 |
+
"Generate as many as testcases possible minimum 6 ,maximum it can be anything"
|
39 |
"Understand the story thoroughly"
|
40 |
"If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
|
41 |
"""
|
42 |
|
43 |
# Combine the few-shot examples with the user story for the model to process
|
44 |
+
prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
try:
|
47 |
+
# Call the Nvidia llama API with the refined prompt
|
48 |
+
completion = client.chat.completions.create(
|
49 |
+
model="meta/llama-3.1-405b-instruct", # Using llama 3.1 450b
|
50 |
+
messages=[
|
51 |
+
{"role": "user", "content": prompt}
|
52 |
+
],
|
53 |
+
temperature=0.03, # Further lowering temperature for precise and deterministic output
|
54 |
+
top_p=0.7, # Prioritize high-probability tokens even more
|
55 |
+
max_tokens=4096, # Increase max tokens to allow longer content
|
56 |
+
stream=True # Streaming the response for faster retrieval
|
57 |
+
)
|
58 |
+
|
59 |
+
# Initialize an empty string to accumulate the response
|
60 |
+
test_cases_text = ""
|
61 |
+
|
62 |
+
# Accumulate the response from the streaming chunks
|
63 |
+
for chunk in completion:
|
64 |
+
if chunk.choices[0].delta.content is not None:
|
65 |
+
test_cases_text += chunk.choices[0].delta.content
|
66 |
+
|
67 |
+
# Ensure the entire response is captured before cleaning
|
68 |
+
if test_cases_text.strip() == "":
|
69 |
+
return [{"test_case": "No test cases generated or output was empty."}]
|
70 |
+
|
71 |
+
# Clean the output by unescaping HTML entities and replacing <br> tags
|
72 |
+
test_cases_text = clean_test_case_output(test_cases_text)
|
73 |
+
|
74 |
+
try:
|
75 |
+
# Try to parse the output as JSON, assuming the model returns structured test cases
|
76 |
+
test_cases = json.loads(test_cases_text)
|
77 |
+
if isinstance(test_cases, list):
|
78 |
+
return test_cases # Return structured test cases
|
79 |
+
|
80 |
+
else:
|
81 |
+
return [{"test_case": test_cases_text}] # Return as a list with the text wrapped in a dict
|
82 |
+
|
83 |
+
except json.JSONDecodeError:
|
84 |
+
# Fallback: return the raw text if JSON parsing fails
|
85 |
+
return [{"test_case": test_cases_text}]
|
86 |
+
|
87 |
+
except requests.exceptions.RequestException as e:
|
88 |
+
print(f"API request failed: {str(e)}")
|
89 |
+
return []
|
90 |
|
91 |
# Export test cases in CSV format
|
92 |
def export_test_cases(test_cases, format='csv'):
|