File size: 5,478 Bytes
df11144 dac5949 62dd117 df11144 a76211f df11144 8b55d5e 7668197 df11144 439d8a3 df11144 2b8f81c df11144 439d8a3 d03af90 1139cec 5e25e4d e71685b 439d8a3 ec8033d 439d8a3 df11144 2b8f81c df11144 ec8033d df11144 439d8a3 df11144 ec8033d df11144 ec8033d df11144 ec8033d df11144 ec8033d df11144 439d8a3 dac5949 62dd117 df11144 62dd117 dac5949 62dd117 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import json
import io
import requests
import html # For escaping HTML characters
from bs4 import BeautifulSoup
import pandas as pd # Added pandas for Excel export
from openai import OpenAI
# Initialize OpenAI API with Nvidia's Llama model
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key="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"
)
def clean_test_case_output(text):
"""
Cleans the output to handle HTML characters and unwanted tags.
"""
text = html.unescape(text) # Unescape HTML entities
soup = BeautifulSoup(text, 'html.parser') # Use BeautifulSoup to handle HTML tags
cleaned_text = soup.get_text(separator="\n").strip() # Remove tags and handle newlines
return cleaned_text
def generate_testcases(user_story):
"""
Generates advanced QA test cases based on a provided user story by interacting
with Nvidia's llama model API. The prompt is refined for clarity,
and the output is processed for better quality.
:param user_story: A string representing the user story for which to generate test cases.
:return: A list of test cases in the form of dictionaries.
"""
# Few-shot learning examples to guide the model
few_shot_examples = """
"if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
"Generate as many as testcases possible minimum 6 ,maximum it can be anything"
"Understand the story thoroughly"
"If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
"""
# Combine the few-shot examples with the user story for the model to process
prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
try:
# Call the Nvidia llama API with the refined prompt
completion = client.chat.completions.create(
model="meta/llama-3.1-405b-instruct",
messages=[
{"role": "user", "content": prompt}
],
temperature=0.03,
top_p=0.7,
max_tokens=4096,
stream=True
)
# Initialize an empty string to accumulate the response
test_cases_text = ""
# Accumulate the response from the streaming chunks
for chunk in completion:
if chunk.choices[0].delta.content is not None:
test_cases_text += chunk.choices[0].delta.content
# Ensure the entire response is captured before cleaning
if test_cases_text.strip() == "":
return [{"test_case": "No test cases generated or output was empty."}]
# Clean the output by unescaping HTML entities and replacing <br> tags
test_cases_text = clean_test_case_output(test_cases_text)
try:
# Try to parse the output as JSON, assuming the model returns structured test cases
test_cases = json.loads(test_cases_text)
if isinstance(test_cases, list):
return test_cases # Return structured test cases
else:
return [{"test_case": test_cases_text}] # Return as a list with the text wrapped in a dict
except json.JSONDecodeError:
# Fallback: return the raw text if JSON parsing fails
return [{"test_case": test_cases_text}]
except requests.exceptions.RequestException as e:
print(f"API request failed: {str(e)}")
return []
def export_test_cases(test_cases):
"""
Exports the test cases to an Excel file with specific columns:
- Test Case
- Preconditions
- Steps
- Expected Result
:param test_cases: A list of test case dictionaries or raw text.
:return: Bytes of the Excel file.
"""
if not test_cases:
return "No test cases to export."
# Define the structure of the Excel file
formatted_test_cases = []
for case in test_cases:
# Assuming each test case is a dictionary with 'test_case' content or similar
test_case_content = case.get('test_case', '')
# Split the content into separate sections (you might need to adjust based on actual output structure)
lines = test_case_content.split('\n')
test_case = ""
preconditions = ""
steps = ""
expected_result = ""
for line in lines:
if "Preconditions" in line:
preconditions = line.replace("Preconditions:", "").strip()
elif "Steps" in line:
steps = line.replace("Steps:", "").strip()
elif "Expected Result" in line:
expected_result = line.replace("Expected Result:", "").strip()
else:
# Default to putting the first part as the "Test Case"
if not test_case:
test_case = line.strip()
# Append to formatted test cases list
formatted_test_cases.append({
'Test Case': test_case,
'Preconditions': preconditions,
'Steps': steps,
'Expected Result': expected_result
})
# Convert the list of dictionaries into a DataFrame
df = pd.DataFrame(formatted_test_cases)
# Create an Excel file using pandas
output = io.BytesIO()
df.to_excel(output, index=False, engine='openpyxl') # Export to Excel without index
output.seek(0) # Rewind the buffer to the beginning
return output.getvalue()
|