arabellastrange commited on
Commit
843e3ae
·
1 Parent(s): 3152165

removed unneeded code

Browse files
Files changed (1) hide show
  1. app.py +32 -34
app.py CHANGED
@@ -33,23 +33,23 @@ def google_search_chat(message, history):
33
 
34
  response = generate_chat_response_with_history_rag_return_response(index, message, history)
35
 
36
- similar_str = "not calculated"
37
- faithfulness_str = "not calculated"
38
-
39
- if rag_similarity:
40
- sim_evaluator = SemanticSimilarityEvaluator()
41
- faith_evaluator = FaithfulnessEvaluator(llm=get_llm())
42
- # condensed_context = condense_context(relevant_content)
43
- # logger.info("Calculating similarity...")
44
- # similar = sim_evaluator.evaluate(response=str(response),
45
- # reference=condensed_context)
46
- logger.info("Calculating faithfulness...")
47
- faithfulness = faith_evaluator.evaluate_response(query=condensed_question, response=response)
48
- # similar_str = str(round((similar.score * 100), 2)) + "%"
49
- faithfulness_str = "Yes" if faithfulness.passing else "No"
50
-
51
- logger.info(f'**Search Query:** {condensed_question} \n **Faithfulness:** {faithfulness_str} \n '
52
- f'**Similarity:** {similar_str} \n **Sources used:** \n {sources}')
53
 
54
  response_text = []
55
  string_output = ""
@@ -59,21 +59,21 @@ def google_search_chat(message, history):
59
  string_output = ''.join(response_text)
60
  yield string_output
61
 
62
- if not sourced:
63
- pass
64
- if sourced and not query and not rag_similarity:
65
- yield string_output + f'\n\n --- \n **Sources used:** \n {sources}'
66
- if sourced and query and not rag_similarity:
67
- yield (string_output
68
- + f'\n\n --- \n **Search Query:** {condensed_question} '
69
- f'\n **Sources used:** \n {sources}')
70
- if rag_similarity:
71
- yield (string_output
72
- + f'\n\n --- \n **Search Query:** {condensed_question} \n '
73
- # f'**Similarity of response to the sources [ℹ️]'
74
- # f'(https://en.wikipedia.org/wiki/Semantic_similarity):** {similar_str} \n'
75
- f'**Is response in source documents?**: {faithfulness_str}'
76
- f'\n **Sources used:** \n {sources}')
77
 
78
  logger.info(f'Assistant Response: {string_output}')
79
  else:
@@ -99,8 +99,6 @@ if __name__ == '__main__':
99
  logger.setLevel(logging.INFO)
100
 
101
  api_key = read_file(API_KEY_PATH)
102
- global sourced
103
- sourced = False
104
 
105
  # GPT - 4 Turbo. The latest GPT - 4 model intended to reduce cases of “laziness” where the model doesn’t complete
106
  # a task. Returns a maximum of 4,096 output tokens. Link:
 
33
 
34
  response = generate_chat_response_with_history_rag_return_response(index, message, history)
35
 
36
+ # similar_str = "not calculated"
37
+ # faithfulness_str = "not calculated"
38
+ #
39
+ # if rag_similarity:
40
+ # sim_evaluator = SemanticSimilarityEvaluator()
41
+ # faith_evaluator = FaithfulnessEvaluator(llm=get_llm())
42
+ # # condensed_context = condense_context(relevant_content)
43
+ # # logger.info("Calculating similarity...")
44
+ # # similar = sim_evaluator.evaluate(response=str(response),
45
+ # # reference=condensed_context)
46
+ # logger.info("Calculating faithfulness...")
47
+ # faithfulness = faith_evaluator.evaluate_response(query=condensed_question, response=response)
48
+ # # similar_str = str(round((similar.score * 100), 2)) + "%"
49
+ # faithfulness_str = "Yes" if faithfulness.passing else "No"
50
+ #
51
+ # logger.info(f'**Search Query:** {condensed_question} \n **Faithfulness:** {faithfulness_str} \n '
52
+ # f'**Similarity:** {similar_str} \n **Sources used:** \n {sources}')
53
 
54
  response_text = []
55
  string_output = ""
 
59
  string_output = ''.join(response_text)
60
  yield string_output
61
 
62
+ # if not sourced:
63
+ # pass
64
+ # if sourced and not query and not rag_similarity:
65
+ # yield string_output + f'\n\n --- \n **Sources used:** \n {sources}'
66
+ # if sourced and query and not rag_similarity:
67
+ # yield (string_output
68
+ # + f'\n\n --- \n **Search Query:** {condensed_question} '
69
+ # f'\n **Sources used:** \n {sources}')
70
+ # if rag_similarity:
71
+ # yield (string_output
72
+ # + f'\n\n --- \n **Search Query:** {condensed_question} \n '
73
+ # # f'**Similarity of response to the sources [ℹ️]'
74
+ # # f'(https://en.wikipedia.org/wiki/Semantic_similarity):** {similar_str} \n'
75
+ # f'**Is response in source documents?**: {faithfulness_str}'
76
+ # f'\n **Sources used:** \n {sources}')
77
 
78
  logger.info(f'Assistant Response: {string_output}')
79
  else:
 
99
  logger.setLevel(logging.INFO)
100
 
101
  api_key = read_file(API_KEY_PATH)
 
 
102
 
103
  # GPT - 4 Turbo. The latest GPT - 4 model intended to reduce cases of “laziness” where the model doesn’t complete
104
  # a task. Returns a maximum of 4,096 output tokens. Link: