Spaces:
Sleeping
Sleeping
tamas.kiss
commited on
Commit
•
5a1d695
1
Parent(s):
859a84d
Update logs
Browse files
app.py
CHANGED
@@ -268,10 +268,15 @@ def text_to_text_generation(verbose, prompt):
|
|
268 |
match response_num:
|
269 |
case 0:
|
270 |
prompt = f"[INST] {prompt}\n Lets think step by step. [/INST] {start_template}"
|
271 |
-
|
272 |
case 1:
|
273 |
if retriever == "semantic_search":
|
274 |
question = prompt
|
|
|
|
|
|
|
|
|
|
|
275 |
retrieved_results = semantic_search(prompt)
|
276 |
prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
|
277 |
|
@@ -288,6 +293,12 @@ def text_to_text_generation(verbose, prompt):
|
|
288 |
question = prompt
|
289 |
prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
|
290 |
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
md = (
|
292 |
f"### Step 1: Preparing prompt for additional documentation\n"
|
293 |
f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation:\n"
|
@@ -298,9 +309,11 @@ def text_to_text_generation(verbose, prompt):
|
|
298 |
)
|
299 |
else:
|
300 |
prompt = f"[INST] Answer the following question: {prompt} [/INST]\nAnswer: "
|
|
|
301 |
|
302 |
case _:
|
303 |
prompt = f"[INST] {prompt} [/INST]"
|
|
|
304 |
|
305 |
return prompt, md
|
306 |
|
@@ -341,13 +354,12 @@ def text_to_text_generation(verbose, prompt):
|
|
341 |
|
342 |
if response_num == 2:
|
343 |
prompt, md = create_generation_prompt(response_num, prompt, False)
|
344 |
-
print('Prompt given to model:\n' + prompt + '\n')
|
345 |
original, new = generate_batch(prompt)[0]
|
346 |
prompt, response = cleanup(original, new)
|
347 |
if verbose:
|
348 |
return (
|
349 |
f"# 📚KubeWizard📚\n"
|
350 |
-
f"A helpful Kubernetes Assistant powered by Component Soft\n"
|
351 |
f"--------------------------------------------\n"
|
352 |
f"# Classified your prompt as:\n"
|
353 |
f"{modes}\n\n"
|
@@ -358,7 +370,7 @@ def text_to_text_generation(verbose, prompt):
|
|
358 |
else:
|
359 |
return (
|
360 |
f"# 📚KubeWizard📚\n"
|
361 |
-
f"A helpful Kubernetes Assistant powered by Component Soft\n"
|
362 |
f"--------------------------------------------\n"
|
363 |
f"# Classified your prompt as:\n"
|
364 |
f"{modes}\n\n"
|
@@ -367,14 +379,13 @@ def text_to_text_generation(verbose, prompt):
|
|
367 |
|
368 |
if response_num == 0:
|
369 |
prompt, md = create_generation_prompt(response_num, prompt, False)
|
370 |
-
print('Prompt given to model:\n' + prompt + '\n')
|
371 |
original, new = generate_batch(prompt)[0]
|
372 |
prompt, response = cleanup(original, new)
|
373 |
model_response = new[len(original):].strip()
|
374 |
if verbose:
|
375 |
return (
|
376 |
f"# 📚KubeWizard📚\n"
|
377 |
-
f"A helpful Kubernetes Assistant powered by Component Soft\n"
|
378 |
f"--------------------------------------------\n"
|
379 |
f"# Classified your prompt as:\n"
|
380 |
f"{modes}\n\n"
|
@@ -388,7 +399,7 @@ def text_to_text_generation(verbose, prompt):
|
|
388 |
else:
|
389 |
return (
|
390 |
f"# 📚KubeWizard📚\n"
|
391 |
-
f"A helpful Kubernetes Assistant powered by Component Soft\n"
|
392 |
f"--------------------------------------------\n"
|
393 |
f"# Classified your prompt as:\n"
|
394 |
f"{modes}\n\n"
|
@@ -396,11 +407,8 @@ def text_to_text_generation(verbose, prompt):
|
|
396 |
)
|
397 |
|
398 |
res_prompt, res_md = create_generation_prompt(response_num, prompt, False)
|
399 |
-
print(f'Prompt given to finetuned model:\n{res_prompt}\n')
|
400 |
res_semantic_search_prompt, res_semantic_search_md = create_generation_prompt(response_num, prompt, "semantic_search")
|
401 |
-
print(f'Prompt given to model with RAG:\n{res_semantic_search_prompt}\n')
|
402 |
res_google_search_prompt, res_google_search_md = create_generation_prompt(response_num, prompt, "google_search")
|
403 |
-
print(f'Prompt given to model with Google search:\n{res_google_search_prompt}\n')
|
404 |
|
405 |
gen_normal, gen_semantic_search, gen_google_search = generate_batch(
|
406 |
res_prompt, res_semantic_search_prompt, res_google_search_prompt
|
@@ -413,7 +421,7 @@ def text_to_text_generation(verbose, prompt):
|
|
413 |
if verbose:
|
414 |
return (
|
415 |
f"# 📚KubeWizard📚\n"
|
416 |
-
f"A helpful Kubernetes Assistant powered by Component Soft\n"
|
417 |
f"--------------------------------------------\n"
|
418 |
f"# Classified your prompt as:\n"
|
419 |
f"{modes}\n\n"
|
@@ -439,7 +447,7 @@ def text_to_text_generation(verbose, prompt):
|
|
439 |
else:
|
440 |
return (
|
441 |
f"# 📚KubeWizard📚\n"
|
442 |
-
f"A helpful Kubernetes Assistant powered by Component Soft\n"
|
443 |
f"--------------------------------------------\n"
|
444 |
f"# Classified your prompt as:\n"
|
445 |
f"{modes}\n\n"
|
|
|
268 |
match response_num:
|
269 |
case 0:
|
270 |
prompt = f"[INST] {prompt}\n Lets think step by step. [/INST] {start_template}"
|
271 |
+
print(prompt)
|
272 |
case 1:
|
273 |
if retriever == "semantic_search":
|
274 |
question = prompt
|
275 |
+
print(
|
276 |
+
(
|
277 |
+
f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: [RETRIEVED_RESULTS_FROM_BOOK] [INST] Answer the following question: {question} [/INST]\nAnswer: \n")
|
278 |
+
|
279 |
+
)
|
280 |
retrieved_results = semantic_search(prompt)
|
281 |
prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
|
282 |
|
|
|
293 |
question = prompt
|
294 |
prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
|
295 |
|
296 |
+
print(
|
297 |
+
(
|
298 |
+
f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: [RETRIEVED_RESULTS_FROM_GOOGLE] [INST] Answer the following question: {question} [/INST]\nAnswer: \n"
|
299 |
+
)
|
300 |
+
)
|
301 |
+
|
302 |
md = (
|
303 |
f"### Step 1: Preparing prompt for additional documentation\n"
|
304 |
f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation:\n"
|
|
|
309 |
)
|
310 |
else:
|
311 |
prompt = f"[INST] Answer the following question: {prompt} [/INST]\nAnswer: "
|
312 |
+
print(prompt)
|
313 |
|
314 |
case _:
|
315 |
prompt = f"[INST] {prompt} [/INST]"
|
316 |
+
print(prompt)
|
317 |
|
318 |
return prompt, md
|
319 |
|
|
|
354 |
|
355 |
if response_num == 2:
|
356 |
prompt, md = create_generation_prompt(response_num, prompt, False)
|
|
|
357 |
original, new = generate_batch(prompt)[0]
|
358 |
prompt, response = cleanup(original, new)
|
359 |
if verbose:
|
360 |
return (
|
361 |
f"# 📚KubeWizard📚\n"
|
362 |
+
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
363 |
f"--------------------------------------------\n"
|
364 |
f"# Classified your prompt as:\n"
|
365 |
f"{modes}\n\n"
|
|
|
370 |
else:
|
371 |
return (
|
372 |
f"# 📚KubeWizard📚\n"
|
373 |
+
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
374 |
f"--------------------------------------------\n"
|
375 |
f"# Classified your prompt as:\n"
|
376 |
f"{modes}\n\n"
|
|
|
379 |
|
380 |
if response_num == 0:
|
381 |
prompt, md = create_generation_prompt(response_num, prompt, False)
|
|
|
382 |
original, new = generate_batch(prompt)[0]
|
383 |
prompt, response = cleanup(original, new)
|
384 |
model_response = new[len(original):].strip()
|
385 |
if verbose:
|
386 |
return (
|
387 |
f"# 📚KubeWizard📚\n"
|
388 |
+
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
389 |
f"--------------------------------------------\n"
|
390 |
f"# Classified your prompt as:\n"
|
391 |
f"{modes}\n\n"
|
|
|
399 |
else:
|
400 |
return (
|
401 |
f"# 📚KubeWizard📚\n"
|
402 |
+
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
403 |
f"--------------------------------------------\n"
|
404 |
f"# Classified your prompt as:\n"
|
405 |
f"{modes}\n\n"
|
|
|
407 |
)
|
408 |
|
409 |
res_prompt, res_md = create_generation_prompt(response_num, prompt, False)
|
|
|
410 |
res_semantic_search_prompt, res_semantic_search_md = create_generation_prompt(response_num, prompt, "semantic_search")
|
|
|
411 |
res_google_search_prompt, res_google_search_md = create_generation_prompt(response_num, prompt, "google_search")
|
|
|
412 |
|
413 |
gen_normal, gen_semantic_search, gen_google_search = generate_batch(
|
414 |
res_prompt, res_semantic_search_prompt, res_google_search_prompt
|
|
|
421 |
if verbose:
|
422 |
return (
|
423 |
f"# 📚KubeWizard📚\n"
|
424 |
+
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
425 |
f"--------------------------------------------\n"
|
426 |
f"# Classified your prompt as:\n"
|
427 |
f"{modes}\n\n"
|
|
|
447 |
else:
|
448 |
return (
|
449 |
f"# 📚KubeWizard📚\n"
|
450 |
+
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
451 |
f"--------------------------------------------\n"
|
452 |
f"# Classified your prompt as:\n"
|
453 |
f"{modes}\n\n"
|