File size: 4,458 Bytes
37c870e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
2024-07-03 02:55:23 | INFO | model_worker | args: Namespace(awq_ckpt=None, awq_groupsize=-1, awq_wbits=16, controller_address='http://127.0.0.1:21002', conv_template=None, cpu_offloading=False, debug=False, device='cuda', dtype=None, embed_in_truncate=False, enable_exllama=False, enable_xft=False, exllama_cache_8bit=False, exllama_gpu_split=None, exllama_max_seq_len=4096, gptq_act_order=False, gptq_ckpt=None, gptq_groupsize=-1, gptq_wbits=16, gpus=None, host='127.0.0.1', limit_worker_concurrency=5, load_8bit=False, max_gpu_memory=None, model_names=['vicuna-7b-v1.1'], model_path='/path/to/vicuna/weights', no_register=False, num_gpus=1, port=21003, revision='main', seed=None, ssl=False, stream_interval=2, worker_address='http://127.0.0.1:21003', xft_dtype=None, xft_max_seq_len=4096)
2024-07-03 02:55:23 | INFO | model_worker | Loading the model ['vicuna-7b-v1.1'] on worker 2476a693 ...
2024-07-03 02:55:23 | ERROR | stderr | Traceback (most recent call last):
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/local/lib/python3.8/dist-packages/transformers/utils/hub.py", line 402, in cached_file
2024-07-03 02:55:23 | ERROR | stderr |     resolved_file = hf_hub_download(
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/local/lib/python3.8/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
2024-07-03 02:55:23 | ERROR | stderr |     validate_repo_id(arg_value)
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/local/lib/python3.8/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
2024-07-03 02:55:23 | ERROR | stderr |     raise HFValidationError(
2024-07-03 02:55:23 | ERROR | stderr | huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/path/to/vicuna/weights'. Use `repo_type` argument if needed.
2024-07-03 02:55:23 | ERROR | stderr | 
2024-07-03 02:55:23 | ERROR | stderr | The above exception was the direct cause of the following exception:
2024-07-03 02:55:23 | ERROR | stderr | 
2024-07-03 02:55:23 | ERROR | stderr | Traceback (most recent call last):
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
2024-07-03 02:55:23 | ERROR | stderr |     return _run_code(code, main_globals, None,
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
2024-07-03 02:55:23 | ERROR | stderr |     exec(code, run_globals)
2024-07-03 02:55:23 | ERROR | stderr |   File "/LLM_32T/evelyn/FastChat/fastchat/serve/model_worker.py", line 414, in <module>
2024-07-03 02:55:23 | ERROR | stderr |     args, worker = create_model_worker()
2024-07-03 02:55:23 | ERROR | stderr |   File "/LLM_32T/evelyn/FastChat/fastchat/serve/model_worker.py", line 385, in create_model_worker
2024-07-03 02:55:23 | ERROR | stderr |     worker = ModelWorker(
2024-07-03 02:55:23 | ERROR | stderr |   File "/LLM_32T/evelyn/FastChat/fastchat/serve/model_worker.py", line 77, in __init__
2024-07-03 02:55:23 | ERROR | stderr |     self.model, self.tokenizer = load_model(
2024-07-03 02:55:23 | ERROR | stderr |   File "/LLM_32T/evelyn/FastChat/fastchat/model/model_adapter.py", line 367, in load_model
2024-07-03 02:55:23 | ERROR | stderr |     model, tokenizer = adapter.load_model(model_path, kwargs)
2024-07-03 02:55:23 | ERROR | stderr |   File "/LLM_32T/evelyn/FastChat/fastchat/model/model_adapter.py", line 706, in load_model
2024-07-03 02:55:23 | ERROR | stderr |     tokenizer = AutoTokenizer.from_pretrained(
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/local/lib/python3.8/dist-packages/transformers/models/auto/tokenization_auto.py", line 826, in from_pretrained
2024-07-03 02:55:23 | ERROR | stderr |     tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/local/lib/python3.8/dist-packages/transformers/models/auto/tokenization_auto.py", line 658, in get_tokenizer_config
2024-07-03 02:55:23 | ERROR | stderr |     resolved_config_file = cached_file(
2024-07-03 02:55:23 | ERROR | stderr |   File "/usr/local/lib/python3.8/dist-packages/transformers/utils/hub.py", line 466, in cached_file
2024-07-03 02:55:23 | ERROR | stderr |     raise EnvironmentError(
2024-07-03 02:55:23 | ERROR | stderr | OSError: Incorrect path_or_model_id: '/path/to/vicuna/weights'. Please provide either the path to a local folder or the repo_id of a model on the Hub.