Spaces:
				
			
			
	
			
			
		Build error
		
	
	
	
			
			
	
	
	
	
		
		
		Build error
		
	Commit 
							
							·
						
						ceca114
	
1
								Parent(s):
							
							fdb7c69
								
update
Browse files
    	
        src/backend/run_eval_suite.py
    CHANGED
    
    | @@ -1,4 +1,4 @@ | |
| 1 | 
            -
            from lm_eval import  | 
| 2 | 
             
            from lm_eval.tasks import TaskManager
         | 
| 3 |  | 
| 4 | 
             
            from src.backend.manage_requests import EvalRequest
         | 
|  | |
| 1 | 
            +
            from lm_eval import evaluator
         | 
| 2 | 
             
            from lm_eval.tasks import TaskManager
         | 
| 3 |  | 
| 4 | 
             
            from src.backend.manage_requests import EvalRequest
         | 
    	
        src/leaderboard/read_evals.py
    CHANGED
    
    | @@ -11,6 +11,8 @@ from src.display.formatting import make_clickable_model | |
| 11 | 
             
            from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
         | 
| 12 | 
             
            from src.submission.check_validity import is_model_on_hub
         | 
| 13 |  | 
|  | |
|  | |
| 14 |  | 
| 15 | 
             
            def is_float(string):
         | 
| 16 | 
             
                try:
         | 
| @@ -215,7 +217,10 @@ def get_request_file_for_model_open_llm(requests_path, model_name, precision): | |
| 215 | 
             
                return request_file
         | 
| 216 |  | 
| 217 |  | 
| 218 | 
            -
            def get_raw_eval_results(results_path: str, | 
|  | |
|  | |
|  | |
| 219 | 
             
                """From the path of the results folder root, extract all needed info for results"""
         | 
| 220 | 
             
                model_result_filepaths = []
         | 
| 221 |  | 
| @@ -238,7 +243,7 @@ def get_raw_eval_results(results_path: str, requests_path: str, requests_path_op | |
| 238 | 
             
                    # Creation of result
         | 
| 239 | 
             
                    eval_result = EvalResult.init_from_json_file(model_result_filepath, is_backend=is_backend)
         | 
| 240 | 
             
                    eval_result.update_with_request_file(requests_path)
         | 
| 241 | 
            -
                    if requests_path_open_llm:
         | 
| 242 | 
             
                        eval_result.update_model_type_with_open_llm_request_file(requests_path_open_llm)
         | 
| 243 | 
             
                    # Store results of same eval together
         | 
| 244 | 
             
                    eval_name = eval_result.eval_name
         | 
|  | |
| 11 | 
             
            from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
         | 
| 12 | 
             
            from src.submission.check_validity import is_model_on_hub
         | 
| 13 |  | 
| 14 | 
            +
            from typing import Optional
         | 
| 15 | 
            +
             | 
| 16 |  | 
| 17 | 
             
            def is_float(string):
         | 
| 18 | 
             
                try:
         | 
|  | |
| 217 | 
             
                return request_file
         | 
| 218 |  | 
| 219 |  | 
| 220 | 
            +
            def get_raw_eval_results(results_path: str,
         | 
| 221 | 
            +
                                     requests_path: str,
         | 
| 222 | 
            +
                                     requests_path_open_llm: Optional[str] = None,
         | 
| 223 | 
            +
                                     is_backend: bool = False) -> list[EvalResult]:
         | 
| 224 | 
             
                """From the path of the results folder root, extract all needed info for results"""
         | 
| 225 | 
             
                model_result_filepaths = []
         | 
| 226 |  | 
|  | |
| 243 | 
             
                    # Creation of result
         | 
| 244 | 
             
                    eval_result = EvalResult.init_from_json_file(model_result_filepath, is_backend=is_backend)
         | 
| 245 | 
             
                    eval_result.update_with_request_file(requests_path)
         | 
| 246 | 
            +
                    if requests_path_open_llm is not None:
         | 
| 247 | 
             
                        eval_result.update_model_type_with_open_llm_request_file(requests_path_open_llm)
         | 
| 248 | 
             
                    # Store results of same eval together
         | 
| 249 | 
             
                    eval_name = eval_result.eval_name
         | 
 
			
