|
import json |
|
import io |
|
import requests |
|
import html |
|
from bs4 import BeautifulSoup |
|
import pandas as pd |
|
from openai import OpenAI |
|
|
|
|
|
client = OpenAI( |
|
base_url="https://integrate.api.nvidia.com/v1", |
|
api_key="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" |
|
) |
|
|
|
def clean_test_case_output(text): |
|
""" |
|
Cleans the output to handle HTML characters and unwanted tags. |
|
""" |
|
text = html.unescape(text) |
|
soup = BeautifulSoup(text, 'html.parser') |
|
cleaned_text = soup.get_text(separator="\n").strip() |
|
return cleaned_text |
|
|
|
def generate_testcases(user_story): |
|
""" |
|
Generates advanced QA test cases based on a provided user story by interacting |
|
with Nvidia's llama model API. The prompt is refined for clarity, |
|
and the output is processed for better quality. |
|
|
|
:param user_story: A string representing the user story for which to generate test cases. |
|
:return: A list of test cases in the form of dictionaries. |
|
""" |
|
|
|
|
|
few_shot_examples = """ |
|
"if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App" |
|
"Generate as many as testcases possible minimum 6 ,maximum it can be anything" |
|
"Understand the story thoroughly" |
|
"If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal" |
|
""" |
|
|
|
|
|
prompt = few_shot_examples + f"\nUser Story: {user_story}\n" |
|
|
|
try: |
|
|
|
completion = client.chat.completions.create( |
|
model="meta/llama-3.1-405b-instruct", |
|
messages=[ |
|
{"role": "user", "content": prompt} |
|
], |
|
temperature=0.03, |
|
top_p=0.7, |
|
max_tokens=4096, |
|
stream=True |
|
) |
|
|
|
|
|
test_cases_text = "" |
|
|
|
|
|
for chunk in completion: |
|
if chunk.choices[0].delta.content is not None: |
|
test_cases_text += chunk.choices[0].delta.content |
|
|
|
|
|
if test_cases_text.strip() == "": |
|
return [{"test_case": "No test cases generated or output was empty."}] |
|
|
|
|
|
test_cases_text = clean_test_case_output(test_cases_text) |
|
|
|
try: |
|
|
|
test_cases = json.loads(test_cases_text) |
|
if isinstance(test_cases, list): |
|
return test_cases |
|
else: |
|
return [{"test_case": test_cases_text}] |
|
|
|
except json.JSONDecodeError: |
|
|
|
return [{"test_case": test_cases_text}] |
|
|
|
except requests.exceptions.RequestException as e: |
|
print(f"API request failed: {str(e)}") |
|
return [] |
|
|
|
def export_test_cases(test_cases): |
|
""" |
|
Exports the test cases to an Excel file with specific columns: |
|
- Test Case |
|
- Preconditions |
|
- Steps |
|
- Expected Result |
|
|
|
:param test_cases: A list of test case dictionaries or raw text. |
|
:return: Bytes of the Excel file. |
|
""" |
|
if not test_cases: |
|
return "No test cases to export." |
|
|
|
|
|
formatted_test_cases = [] |
|
|
|
for case in test_cases: |
|
|
|
test_case_content = case.get('test_case', '') |
|
|
|
|
|
lines = test_case_content.split('\n') |
|
test_case = "" |
|
preconditions = "" |
|
steps = "" |
|
expected_result = "" |
|
|
|
for line in lines: |
|
if "Preconditions" in line: |
|
preconditions = line.replace("Preconditions:", "").strip() |
|
elif "Steps" in line: |
|
steps = line.replace("Steps:", "").strip() |
|
elif "Expected Result" in line: |
|
expected_result = line.replace("Expected Result:", "").strip() |
|
else: |
|
|
|
if not test_case: |
|
test_case = line.strip() |
|
|
|
|
|
formatted_test_cases.append({ |
|
'Test Case': test_case, |
|
'Preconditions': preconditions, |
|
'Steps': steps, |
|
'Expected Result': expected_result |
|
}) |
|
|
|
|
|
df = pd.DataFrame(formatted_test_cases) |
|
|
|
|
|
output = io.BytesIO() |
|
df.to_excel(output, index=False, engine='openpyxl') |
|
output.seek(0) |
|
|
|
return output.getvalue() |
|
|