Cannot load.
Traceback (most recent call last):
File "C:\text-generation-webui-main\text-generation-webui-main\modules\ui_model_menu.py", line 245, in load_model_wrapper
shared.model, shared.tokenizer = load_model(selected_model, loader)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\text-generation-webui-main\text-generation-webui-main\modules\models.py", line 87, in load_model
output = load_func_maploader
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\text-generation-webui-main\text-generation-webui-main\modules\models.py", line 261, in llamacpp_loader
model, tokenizer = LlamaCppModel.from_pretrained(model_file)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\text-generation-webui-main\text-generation-webui-main\modules\llamacpp_model.py", line 102, in from_pretrained
result.model = Llama(**params)
^^^^^^^^^^^^^^^
File "C:\text-generation-webui-main\text-generation-webui-main\installer_files\env\Lib\site-packages\llama_cpp_cuda\llama.py", line 311, in init
self._model = _LlamaModel(
^^^^^^^^^^^^
File "C:\text-generation-webui-main\text-generation-webui-main\installer_files\env\Lib\site-packages\llama_cpp_cuda_internals.py", line 55, in init
raise ValueError(f"Failed to load model from file: {path_model}")
ValueError: Failed to load model from file: models\command-r-plus-Q5_K_M.gguf
This is due to fact that version of llama-cpp-python
used by oobabooga/text-generation-webui
does not yet support this model. You can try to build it yourself or wait for an update