Spaces:
Sleeping
Sleeping
tamas.kiss
commited on
Commit
•
d580c4b
1
Parent(s):
67fec6e
Update output with project name
Browse files
app.py
CHANGED
@@ -322,9 +322,15 @@ def text_to_text_generation(verbose, prompt):
|
|
322 |
original, new = generate_batch(prompt)[0]
|
323 |
prompt, response = cleanup(original, new)
|
324 |
if verbose:
|
325 |
-
return
|
|
|
|
|
|
|
326 |
else:
|
327 |
-
return
|
|
|
|
|
|
|
328 |
|
329 |
if response_num == 0:
|
330 |
prompt = create_generation_prompt(response_num, prompt, False)
|
@@ -334,6 +340,7 @@ def text_to_text_generation(verbose, prompt):
|
|
334 |
model_response = new[len(original):].strip()
|
335 |
if verbose:
|
336 |
return (
|
|
|
337 |
f"{modes}\n\n"
|
338 |
f"# Prompt given to the model:\n"
|
339 |
f"{str_to_md(prompt)}\n"
|
@@ -343,7 +350,10 @@ def text_to_text_generation(verbose, prompt):
|
|
343 |
f"```bash\n{str_to_md(response)}\n```\n"
|
344 |
)
|
345 |
else:
|
346 |
-
return
|
|
|
|
|
|
|
347 |
|
348 |
res_prompt = create_generation_prompt(response_num, prompt, False)
|
349 |
print(f'Prompt given to finetuned model:\n{res_prompt}\n')
|
@@ -362,6 +372,7 @@ def text_to_text_generation(verbose, prompt):
|
|
362 |
|
363 |
if verbose:
|
364 |
return (
|
|
|
365 |
f"{modes}\n\n"
|
366 |
f"# Answer with finetuned model\n"
|
367 |
f"## Prompt given to the model:\n"
|
@@ -381,6 +392,7 @@ def text_to_text_generation(verbose, prompt):
|
|
381 |
)
|
382 |
else:
|
383 |
return (
|
|
|
384 |
f"{modes}\n\n"
|
385 |
f"# Answer with finetuned model\n\n {str_to_md(res_normal)}\n"
|
386 |
f"# Answer with RAG\n\n {str_to_md(res_semantic_search)}\n"
|
|
|
322 |
original, new = generate_batch(prompt)[0]
|
323 |
prompt, response = cleanup(original, new)
|
324 |
if verbose:
|
325 |
+
return (
|
326 |
+
f"# 📚KubeWizard📚\n"
|
327 |
+
f"{modes}\n\n" f"# Prompt given to the model:\n" f"{str_to_md(prompt)}\n" f"# Model's answer:\n" f"{str_to_md(response)}\n"
|
328 |
+
)
|
329 |
else:
|
330 |
+
return (
|
331 |
+
f"# 📚KubeWizard📚\n"
|
332 |
+
f"{modes}\n\n" f"# Answer:\n" f"{str_to_md(response)}"
|
333 |
+
)
|
334 |
|
335 |
if response_num == 0:
|
336 |
prompt = create_generation_prompt(response_num, prompt, False)
|
|
|
340 |
model_response = new[len(original):].strip()
|
341 |
if verbose:
|
342 |
return (
|
343 |
+
f"# 📚KubeWizard📚\n"
|
344 |
f"{modes}\n\n"
|
345 |
f"# Prompt given to the model:\n"
|
346 |
f"{str_to_md(prompt)}\n"
|
|
|
350 |
f"```bash\n{str_to_md(response)}\n```\n"
|
351 |
)
|
352 |
else:
|
353 |
+
return (
|
354 |
+
f"# 📚KubeWizard📚\n"
|
355 |
+
f"{modes}\n\n" f"# Answer:\n" f"```bash\n{str_to_md(response)}\n```\n"
|
356 |
+
)
|
357 |
|
358 |
res_prompt = create_generation_prompt(response_num, prompt, False)
|
359 |
print(f'Prompt given to finetuned model:\n{res_prompt}\n')
|
|
|
372 |
|
373 |
if verbose:
|
374 |
return (
|
375 |
+
f"# 📚KubeWizard📚\n"
|
376 |
f"{modes}\n\n"
|
377 |
f"# Answer with finetuned model\n"
|
378 |
f"## Prompt given to the model:\n"
|
|
|
392 |
)
|
393 |
else:
|
394 |
return (
|
395 |
+
f"# 📚KubeWizard📚\n"
|
396 |
f"{modes}\n\n"
|
397 |
f"# Answer with finetuned model\n\n {str_to_md(res_normal)}\n"
|
398 |
f"# Answer with RAG\n\n {str_to_md(res_semantic_search)}\n"
|