ArvindSelvaraj commited on
Commit
0df70ac
1 Parent(s): 6035560

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +65 -104
backend.py CHANGED
@@ -1,12 +1,10 @@
1
  import json
 
2
  import io
3
  import requests
4
  import html # For escaping HTML characters
5
  from bs4 import BeautifulSoup
6
- import pandas as pd
7
- from openpyxl import Workbook
8
- from openpyxl.styles import Alignment, Font
9
- from openai import OpenAI
10
 
11
  # Initialize OpenAI API with Nvidia's Llama model
12
  client = OpenAI(
@@ -26,142 +24,105 @@ def clean_test_case_output(text):
26
  def generate_testcases(user_story):
27
  """
28
  Generates advanced QA test cases based on a provided user story by interacting
29
- with Nvidia's llama model API.
 
30
 
31
  :param user_story: A string representing the user story for which to generate test cases.
32
- :return: A list of dictionaries with test case information.
33
  """
 
 
34
  few_shot_examples = """
 
35
  "if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
36
  "Generate as many as testcases possible minimum 6 ,maximum it can be anything"
37
  "Understand the story thoroughly"
38
  "If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
39
-
40
- Please generate test cases in the following format:
41
-
42
- Test Case 1:
43
- Preconditions: [Describe any preconditions here]
44
- Steps: [List the steps required to perform the test]
45
- Expected Result: [Describe the expected result of the test]
46
-
47
- Test Case 2:
48
- Preconditions: [Describe any preconditions here]
49
- Steps: [List the steps required to perform the test]
50
- Expected Result: [Describe the expected result of the test]
51
  """
52
 
53
- prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
 
54
 
55
  try:
 
56
  completion = client.chat.completions.create(
57
- model="meta/llama-3.1-405b-instruct",
58
  messages=[
59
  {"role": "user", "content": prompt}
60
  ],
61
- temperature=0.2,
62
- top_p=0.5,
63
- max_tokens=4096,
64
- stream=True
65
  )
66
 
67
  # Initialize an empty string to accumulate the response
68
  test_cases_text = ""
69
-
70
  # Accumulate the response from the streaming chunks
71
  for chunk in completion:
72
  if chunk.choices[0].delta.content is not None:
73
  test_cases_text += chunk.choices[0].delta.content
74
 
75
- # Print raw response for debugging
76
- print("Raw response from model:", test_cases_text)
77
 
78
  # Ensure the entire response is captured before cleaning
79
  if test_cases_text.strip() == "":
80
- return [{"Test Case": "No test cases generated or output was empty."}]
81
-
82
  # Clean the output by unescaping HTML entities and replacing <br> tags
83
  test_cases_text = clean_test_case_output(test_cases_text)
84
 
85
- # Print cleaned response for debugging
86
- print("Cleaned response:", test_cases_text)
87
-
88
- # Split the output into individual test cases by detecting patterns
89
- test_case_blocks = test_cases_text.split('\n\n')
90
- test_cases = []
91
- for block in test_case_blocks:
92
- lines = block.split('\n')
93
- if len(lines) >= 4:
94
- test_case = {
95
- 'Test Case': lines[0].replace('Test Case ', '').strip(),
96
- 'Preconditions': lines[1].replace('Preconditions: ', '').strip(),
97
- 'Steps': lines[2].replace('Steps: ', '').strip(),
98
- 'Expected Result': lines[3].replace('Expected Result: ', '').strip(),
99
- }
100
- test_cases.append(test_case)
101
-
102
- if not test_cases:
103
- return [{"Test Case": "No test cases generated or output was empty."}]
104
 
105
- return test_cases
 
106
 
 
 
 
 
107
  except requests.exceptions.RequestException as e:
108
  print(f"API request failed: {str(e)}")
109
- return [{"Test Case": "API request failed."}]
110
 
111
- def export_test_cases(test_cases):
112
- """
113
- Exports the test cases to an Excel file with specific columns:
114
- - Test Case
115
- - Preconditions
116
- - Steps
117
- - Expected Result
118
-
119
- :param test_cases: A list of test case dictionaries.
120
- :return: Bytes of the Excel file.
121
- """
122
  if not test_cases:
123
  return "No test cases to export."
124
 
125
- formatted_test_cases = []
126
-
127
- for case in test_cases:
128
- # Ensure each field has a default value if missing
129
- test_case = case.get('Test Case', 'N/A')
130
- preconditions = case.get('Preconditions', 'N/A')
131
- steps = case.get('Steps', 'N/A')
132
- expected_result = case.get('Expected Result', 'N/A')
133
-
134
- formatted_test_cases.append({
135
- 'Test Case': test_case,
136
- 'Preconditions': preconditions,
137
- 'Steps': steps,
138
- 'Expected Result': expected_result
139
- })
140
-
141
- wb = Workbook()
142
- ws = wb.active
143
- ws.title = "Test Cases"
144
-
145
- # Add headers with formatting
146
- headers = ["Test Case", "Preconditions", "Steps", "Expected Result"]
147
- ws.append(headers)
148
-
149
- for cell in ws[1]:
150
- cell.font = Font(bold=True)
151
- cell.alignment = Alignment(horizontal="center", vertical="center")
152
-
153
- # Add the test case data
154
- for case in formatted_test_cases:
155
- ws.append([case["Test Case"], case["Preconditions"], case["Steps"], case["Expected Result"]])
156
-
157
- # Adjust column widths for neatness
158
- ws.column_dimensions['A'].width = 50 # Test Case
159
- ws.column_dimensions['B'].width = 30 # Preconditions
160
- ws.column_dimensions['C'].width = 50 # Steps
161
- ws.column_dimensions['D'].width = 50 # Expected Result
162
-
163
- output = io.BytesIO()
164
- wb.save(output)
165
- output.seek(0)
166
-
167
- return output.getvalue()
 
1
  import json
2
+ import csv
3
  import io
4
  import requests
5
  import html # For escaping HTML characters
6
  from bs4 import BeautifulSoup
7
+ from openai import OpenAI
 
 
 
8
 
9
  # Initialize OpenAI API with Nvidia's Llama model
10
  client = OpenAI(
 
24
  def generate_testcases(user_story):
25
  """
26
  Generates advanced QA test cases based on a provided user story by interacting
27
+ with Nvidia's llama model API. The prompt is refined for clarity,
28
+ and the output is processed for better quality.
29
 
30
  :param user_story: A string representing the user story for which to generate test cases.
31
+ :return: A list of test cases in the form of dictionaries.
32
  """
33
+
34
+ # Few-shot learning examples to guide the model
35
  few_shot_examples = """
36
+
37
  "if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
38
  "Generate as many as testcases possible minimum 6 ,maximum it can be anything"
39
  "Understand the story thoroughly"
40
  "If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
 
43
+ # Combine the few-shot examples with the user story for the model to process
44
+ prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
45
 
46
  try:
47
+ # Call the Nvidia llama API with the refined prompt
48
  completion = client.chat.completions.create(
49
+ model="meta/llama-3.1-405b-instruct", # Using llama3.1 405b model
50
  messages=[
51
  {"role": "user", "content": prompt}
52
  ],
53
+ temperature=0.03, # Further lowering temperature for precise and deterministic output
54
+ top_p=0.7, # Prioritize high-probability tokens even more
55
+ max_tokens=4096, # Increase max tokens to allow longer content
56
+ stream=True # Streaming the response for faster retrieval
57
  )
58
 
59
  # Initialize an empty string to accumulate the response
60
  test_cases_text = ""
61
+
62
  # Accumulate the response from the streaming chunks
63
  for chunk in completion:
64
  if chunk.choices[0].delta.content is not None:
65
  test_cases_text += chunk.choices[0].delta.content
66
 
 
 
67
 
68
  # Ensure the entire response is captured before cleaning
69
  if test_cases_text.strip() == "":
70
+ return [{"test_case": "No test cases generated or output was empty."}]
71
+
72
  # Clean the output by unescaping HTML entities and replacing <br> tags
73
  test_cases_text = clean_test_case_output(test_cases_text)
74
 
75
+ try:
76
+ # Try to parse the output as JSON, assuming the model returns structured test cases
77
+ test_cases = json.loads(test_cases_text)
78
+ if isinstance(test_cases, list):
79
+ return test_cases # Return structured test cases
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ else:
82
+ return [{"test_case": test_cases_text}] # Return as a list with the text wrapped in a dict
83
 
84
+ except json.JSONDecodeError:
85
+ # Fallback: return the raw text if JSON parsing fails
86
+ return [{"test_case": test_cases_text}]
87
+
88
  except requests.exceptions.RequestException as e:
89
  print(f"API request failed: {str(e)}")
90
+ return []
91
 
92
+ # Add options for multiple test case formats
93
+ def export_test_cases(test_cases, format='json'):
 
 
 
 
 
 
 
 
 
94
  if not test_cases:
95
  return "No test cases to export."
96
 
97
+ # Convert test cases (which are currently strings) into a structured format for CSV
98
+ structured_test_cases = [{'Test Case': case} for case in test_cases]
99
+
100
+ if format == 'json':
101
+ # Improve JSON export to be line-by-line formatted
102
+ return json.dumps(test_cases, indent=4, separators=(',', ': ')) # More readable format
103
+ elif format == 'csv':
104
+ if isinstance(test_cases, list) and isinstance(test_cases[0], dict):
105
+ output = io.StringIO()
106
+ csv_writer = csv.DictWriter(output, fieldnames=test_cases[0].keys(), quoting=csv.QUOTE_ALL)
107
+ csv_writer.writeheader()
108
+ csv_writer.writerows(test_cases)
109
+ return output.getvalue()
110
+ else:
111
+ raise ValueError("Test cases must be a list of dictionaries for CSV export.")
112
+
113
+ # 2. Save test cases as a downloadable file
114
+ def save_test_cases_as_file(test_cases, format='json'):
115
+ if not test_cases:
116
+ return "No test cases to save."
117
+
118
+ if format == 'json':
119
+ with open('test_cases.json', 'w') as f:
120
+ json.dump(test_cases, f)
121
+ elif format == 'csv':
122
+ with open('test_cases.csv', 'w', newline='') as file:
123
+ dict_writer = csv.DictWriter(file, fieldnames=test_cases[0].keys())
124
+ dict_writer.writeheader()
125
+ dict_writer.writerows(test_cases)
126
+ else:
127
+ return f"Unsupported format: {format}"
128
+ return f'{format} file saved'