ArvindSelvaraj
commited on
Commit
•
5e25e4d
1
Parent(s):
cb27580
Update backend.py
Browse files- backend.py +3 -2
backend.py
CHANGED
@@ -36,6 +36,7 @@ def generate_testcases(user_story):
|
|
36 |
|
37 |
"App we perform testing is Tech360 iOS App"
|
38 |
"Generate as many as testcases possible minimum 10 ,maximum it can be anything"
|
|
|
39 |
"""
|
40 |
|
41 |
# Combine the few-shot examples with the user story for the model to process
|
@@ -48,8 +49,8 @@ def generate_testcases(user_story):
|
|
48 |
messages=[
|
49 |
{"role": "user", "content": prompt}
|
50 |
],
|
51 |
-
temperature=
|
52 |
-
top_p=0.
|
53 |
max_tokens=4096, # Increase max tokens to allow longer content
|
54 |
stream=True # Streaming the response for faster retrieval
|
55 |
)
|
|
|
36 |
|
37 |
"App we perform testing is Tech360 iOS App"
|
38 |
"Generate as many as testcases possible minimum 10 ,maximum it can be anything"
|
39 |
+
"Understand the story thoroughly"
|
40 |
"""
|
41 |
|
42 |
# Combine the few-shot examples with the user story for the model to process
|
|
|
49 |
messages=[
|
50 |
{"role": "user", "content": prompt}
|
51 |
],
|
52 |
+
temperature=0.07, # Further lowering temperature for precise and deterministic output
|
53 |
+
top_p=0.5, # Prioritize high-probability tokens even more
|
54 |
max_tokens=4096, # Increase max tokens to allow longer content
|
55 |
stream=True # Streaming the response for faster retrieval
|
56 |
)
|