import json import io import requests import html # For escaping HTML characters from bs4 import BeautifulSoup import pandas as pd # Added pandas for Excel export from openai import OpenAI # Initialize OpenAI API with Nvidia's Llama model client = OpenAI( base_url="https://integrate.api.nvidia.com/v1", api_key="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" ) def clean_test_case_output(text): """ Cleans the output to handle HTML characters and unwanted tags. """ text = html.unescape(text) # Unescape HTML entities soup = BeautifulSoup(text, 'html.parser') # Use BeautifulSoup to handle HTML tags cleaned_text = soup.get_text(separator="\n").strip() # Remove tags and handle newlines return cleaned_text def generate_testcases(user_story): """ Generates advanced QA test cases based on a provided user story by interacting with Nvidia's llama model API. The prompt is refined for clarity, and the output is processed for better quality. :param user_story: A string representing the user story for which to generate test cases. :return: A list of test cases in the form of dictionaries. """ # Few-shot learning examples to guide the model few_shot_examples = """ "if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App" "Generate as many as testcases possible minimum 6 ,maximum it can be anything" "Understand the story thoroughly" "If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal" """ # Combine the few-shot examples with the user story for the model to process prompt = few_shot_examples + f"\nUser Story: {user_story}\n" try: # Call the Nvidia llama API with the refined prompt completion = client.chat.completions.create( model="meta/llama-3.1-405b-instruct", messages=[ {"role": "user", "content": prompt} ], temperature=0.03, top_p=0.7, max_tokens=4096, stream=True ) # Initialize an empty string to accumulate the response test_cases_text = "" # Accumulate the response from the streaming chunks for chunk in completion: if chunk.choices[0].delta.content is not None: test_cases_text += chunk.choices[0].delta.content # Ensure the entire response is captured before cleaning if test_cases_text.strip() == "": return [{"test_case": "No test cases generated or output was empty."}] # Clean the output by unescaping HTML entities and replacing
tags test_cases_text = clean_test_case_output(test_cases_text) try: # Try to parse the output as JSON, assuming the model returns structured test cases test_cases = json.loads(test_cases_text) if isinstance(test_cases, list): return test_cases # Return structured test cases else: return [{"test_case": test_cases_text}] # Return as a list with the text wrapped in a dict except json.JSONDecodeError: # Fallback: return the raw text if JSON parsing fails return [{"test_case": test_cases_text}] except requests.exceptions.RequestException as e: print(f"API request failed: {str(e)}") return [] def export_test_cases(test_cases): if not test_cases: return "No test cases to export." # Use pandas to export the test cases to Excel output = io.BytesIO() df = pd.DataFrame(test_cases) df.to_excel(output, index=False, engine='openpyxl') # Use 'openpyxl' engine output.seek(0) # Rewind the buffer return output.getvalue()