File size: 5,918 Bytes
df11144 2599152 62dd117 df11144 a76211f df11144 8b55d5e 7668197 df11144 439d8a3 df11144 2599152 df11144 2599152 df11144 439d8a3 5188f5a 439d8a3 ec8033d 439d8a3 df11144 ec8033d df11144 439d8a3 df11144 0a784fa ec8033d df11144 9255728 df11144 ec8033d 9255728 df11144 9255728 df11144 9255728 ec8033d 9255728 df11144 9255728 ec8033d 9255728 439d8a3 9255728 2599152 9255728 2599152 dac5949 62dd117 2599152 62dd117 df11144 62dd117 2599152 62dd117 2599152 62dd117 2599152 62dd117 2599152 62dd117 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import json
import io
import requests
import html # For escaping HTML characters
from bs4 import BeautifulSoup
import pandas as pd
from openpyxl import Workbook
from openpyxl.styles import Alignment, Font
from openai import OpenAI
# Initialize OpenAI API with Nvidia's Llama model
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key="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"
)
def clean_test_case_output(text):
"""
Cleans the output to handle HTML characters and unwanted tags.
"""
text = html.unescape(text) # Unescape HTML entities
soup = BeautifulSoup(text, 'html.parser') # Use BeautifulSoup to handle HTML tags
cleaned_text = soup.get_text(separator="\n").strip() # Remove tags and handle newlines
return cleaned_text
def generate_testcases(user_story):
"""
Generates advanced QA test cases based on a provided user story by interacting
with Nvidia's llama model API.
:param user_story: A string representing the user story for which to generate test cases.
:return: A list of dictionaries with test case information.
"""
few_shot_examples = """
"if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
"Generate as many as testcases possible minimum 6 ,maximum it can be anything"
"Understand the story thoroughly"
"If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
Please generate test cases in the following format:
Test Case 1:
Preconditions: [Describe any preconditions here]
Steps: [List the steps required to perform the test]
Expected Result: [Describe the expected result of the test]
Test Case 2:
Preconditions: [Describe any preconditions here]
Steps: [List the steps required to perform the test]
Expected Result: [Describe the expected result of the test]
"""
prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
try:
completion = client.chat.completions.create(
model="meta/llama-3.1-405b-instruct",
messages=[
{"role": "user", "content": prompt}
],
temperature=0.2,
top_p=0.5,
max_tokens=4096,
stream=True
)
# Initialize an empty string to accumulate the response
test_cases_text = ""
# Accumulate the response from the streaming chunks
for chunk in completion:
if chunk.choices[0].delta.content is not None:
test_cases_text += chunk.choices[0].delta.content
# Print raw response for debugging
print("Raw response from model:", test_cases_text)
# Ensure the entire response is captured before cleaning
if test_cases_text.strip() == "":
return [{"Test Case": "No test cases generated or output was empty."}]
# Clean the output by unescaping HTML entities and replacing <br> tags
test_cases_text = clean_test_case_output(test_cases_text)
# Print cleaned response for debugging
print("Cleaned response:", test_cases_text)
# Split the output into individual test cases by detecting patterns
test_case_blocks = test_cases_text.split('\n\n')
test_cases = []
for block in test_case_blocks:
lines = block.split('\n')
if len(lines) >= 4:
test_case = {
'Test Case': lines[0].replace('Test Case ', '').strip(),
'Preconditions': lines[1].replace('Preconditions: ', '').strip(),
'Steps': lines[2].replace('Steps: ', '').strip(),
'Expected Result': lines[3].replace('Expected Result: ', '').strip(),
}
test_cases.append(test_case)
if not test_cases:
return [{"Test Case": "No test cases generated or output was empty."}]
return test_cases
except requests.exceptions.RequestException as e:
print(f"API request failed: {str(e)}")
return [{"Test Case": "API request failed."}]
def export_test_cases(test_cases):
"""
Exports the test cases to an Excel file with specific columns:
- Test Case
- Preconditions
- Steps
- Expected Result
:param test_cases: A list of test case dictionaries.
:return: Bytes of the Excel file.
"""
if not test_cases:
return "No test cases to export."
formatted_test_cases = []
for case in test_cases:
# Ensure each field has a default value if missing
test_case = case.get('Test Case', 'N/A')
preconditions = case.get('Preconditions', 'N/A')
steps = case.get('Steps', 'N/A')
expected_result = case.get('Expected Result', 'N/A')
formatted_test_cases.append({
'Test Case': test_case,
'Preconditions': preconditions,
'Steps': steps,
'Expected Result': expected_result
})
wb = Workbook()
ws = wb.active
ws.title = "Test Cases"
# Add headers with formatting
headers = ["Test Case", "Preconditions", "Steps", "Expected Result"]
ws.append(headers)
for cell in ws[1]:
cell.font = Font(bold=True)
cell.alignment = Alignment(horizontal="center", vertical="center")
# Add the test case data
for case in formatted_test_cases:
ws.append([case["Test Case"], case["Preconditions"], case["Steps"], case["Expected Result"]])
# Adjust column widths for neatness
ws.column_dimensions['A'].width = 50 # Test Case
ws.column_dimensions['B'].width = 30 # Preconditions
ws.column_dimensions['C'].width = 50 # Steps
ws.column_dimensions['D'].width = 50 # Expected Result
output = io.BytesIO()
wb.save(output)
output.seek(0)
return output.getvalue()
|