File size: 6,938 Bytes
df11144
 
 
 
 
 
8b55d5e
df11144
 
 
8b55d5e
ac972ad
df11144
 
 
 
 
 
439d8a3
 
 
df11144
 
 
 
 
 
 
 
 
 
 
439d8a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df11144
439d8a3
df11144
ac972ad
df11144
439d8a3
df11144
 
 
a477e6a
df11144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439d8a3
df11144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import json
import csv
import io
import requests
import html  # For escaping HTML characters
from bs4 import BeautifulSoup
from openai import OpenAI 

# Initialize OpenAI API with Nvidia's Mistral model
client = OpenAI(
    base_url="https://integrate.api.nvidia.com/v1",
    api_key="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"
)

def clean_test_case_output(text):
    """
    Cleans the output to handle HTML characters and unwanted tags.
    """
    text = html.unescape(text)  # Unescape HTML entities
    soup = BeautifulSoup(text, 'html.parser')  # Use BeautifulSoup to handle HTML tags
    cleaned_text = soup.get_text(separator="\n").strip()  # Remove tags and handle newlines
    return cleaned_text

def generate_testcases(user_story):
    """
    Generates advanced QA test cases based on a provided user story by interacting 
    with Nvidia's Mistral model API. The prompt is refined for clarity, 
    and the output is processed for better quality.
    
    :param user_story: A string representing the user story for which to generate test cases.
    :return: A list of test cases in the form of dictionaries.
    """

    # Few-shot learning examples to guide the model
    few_shot_examples = """
    Example 1:
    User Story:
    Ensure the "Key Details" feature is enabled in the backend.
    1. Open the Tech 360 app.
    2. Click on the Search tab.
    3. Enter the account number and click on Enter.
    4. Choose the account from the Search result.
    5. Verify that the "Key Details" button is displayed under the recommendation section.
    6. Clicking on "Key Details" should navigate to the key details screen.
    
    Test Case:
    Test Case: Verify "Key Details" Feature in Tech 360 App
    Steps:
    1. Launch the Tech 360 app.
    2. Go to the Search tab.
    3. Enter a valid account number and click Enter.
    4. Select an account from the search results.
    5. Ensure the "Key Details" button is displayed under the recommendation section.
    6. Click on "Key Details" and verify that it navigates to the Key Details screen.
    Expected Result: User is navigated to the Key Details screen successfully.
    
    Example 2:
    User Story:
    Open the Tech 360 app. Go On-Job on a Wi-Fi Ready Preinstall Job. Wait for PHT to finish. Go to the Device Details Page for XB7 and ONU.
    
    Test Case:
    Test Case: Verify Device Details Page for XB7 and ONU in Wi-Fi Ready Preinstall Job
    Steps:
    1. Launch the Tech 360 app.
    2. Go On-Job on a Wi-Fi Ready Preinstall Job.
    3. Wait for PHT to finish.
    4. Navigate to the Device Details Page for XB7 and ONU.
    Expected Result: Device details for XB7 and ONU are displayed correctly.
    
    Example 3:
    User Story:
    Open the Tech 360 app. Go On-Job on a Wi-Fi Ready Preinstall Job. Initiate Add/Remove/Swap Flows for ONU and XB7.
    
    Test Case:
    Test Case: Verify Add/Remove/Swap Flows for ONU and XB7 in Wi-Fi Ready Preinstall Job
    Steps:
    1. Launch the Tech 360 app.
    2. Go On-Job on a Wi-Fi Ready Preinstall Job.
    3. Initiate Add/Remove/Swap flows for ONU and XB7.
    Expected Result: Add/Remove/Swap flows for ONU and XB7 are initiated and processed successfully.
    """

    # Combine the few-shot examples with the user story for the model to process
    prompt = few_shot_examples + f"\nUser Story: {user_story}\n"

    try:
        # Call the Nvidia Mistral API with the refined prompt
        completion = client.chat.completions.create(
            model="meta/llama-3.1-405b-instruct",  # Using Mistral model
            messages=[
                {"role": "user", "content": prompt}
            ],
            temperature=0.06,  # Further lowering temperature for precise and deterministic output
            top_p=0.5,  # Prioritize high-probability tokens even more
            max_tokens=4096,  # Increase max tokens to allow longer content
            stream=True  # Streaming the response for faster retrieval
        )

        # Initialize an empty string to accumulate the response
        test_cases_text = ""
        
        # Accumulate the response from the streaming chunks
        for chunk in completion:
            if chunk.choices[0].delta.content is not None:
                test_cases_text += chunk.choices[0].delta.content


        # Ensure the entire response is captured before cleaning
        if test_cases_text.strip() == "":
            return [{"test_case": "No test cases generated or output was empty."}]
        
        # Clean the output by unescaping HTML entities and replacing <br> tags
        test_cases_text = clean_test_case_output(test_cases_text)

        try:
            # Try to parse the output as JSON, assuming the model returns structured test cases
            test_cases = json.loads(test_cases_text)
            if isinstance(test_cases, list):
                return test_cases  # Return structured test cases

            else:
                return [{"test_case": test_cases_text}]  # Return as a list with the text wrapped in a dict

        except json.JSONDecodeError:
            # Fallback: return the raw text if JSON parsing fails
            return [{"test_case": test_cases_text}]
    
    except requests.exceptions.RequestException as e:
        print(f"API request failed: {str(e)}")
        return []

# Add options for multiple test case formats
def export_test_cases(test_cases, format='json'):
    if not test_cases:
        return "No test cases to export."

    # Convert test cases (which are currently strings) into a structured format for CSV
    structured_test_cases = [{'Test Case': case} for case in test_cases]

    if format == 'json':
        # Improve JSON export to be line-by-line formatted
        return json.dumps(test_cases, indent=4, separators=(',', ': '))  # More readable format
    elif format == 'csv':
        if isinstance(test_cases, list) and isinstance(test_cases[0], dict):
            output = io.StringIO()
            csv_writer = csv.DictWriter(output, fieldnames=test_cases[0].keys(), quoting=csv.QUOTE_ALL)
            csv_writer.writeheader()
            csv_writer.writerows(test_cases)
            return output.getvalue()
        else:
            raise ValueError("Test cases must be a list of dictionaries for CSV export.")

# 2. Save test cases as a downloadable file
def save_test_cases_as_file(test_cases, format='json'):
    if not test_cases:
        return "No test cases to save."

    if format == 'json':
        with open('test_cases.json', 'w') as f:
            json.dump(test_cases, f)
    elif format == 'csv':
        with open('test_cases.csv', 'w', newline='') as file:
            dict_writer = csv.DictWriter(file, fieldnames=test_cases[0].keys())
            dict_writer.writeheader()
            dict_writer.writerows(test_cases)
    else:
        return f"Unsupported format: {format}"
    return f'{format} file saved'