How do you load with oobabooga

#1
by spike4379 - opened

Just wondering how this is loaded with oobabooga, no matter what it just errors out after loading the model successfully.

LM Studio Community org

You may need to update to a more recent version, I know 2.2 with a more recent llamacpp was released yesterday

Thanks for the reply bartowski.

I'd just like to add that all the granite models all give the same error for the gguf format.

I did a test running ooba 1.6 and the very latest one with a fresh install, both of them give the same round-about error. Here is the error from 1.6
Traceback (most recent call last):
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\queueing.py", line 566, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\route_utils.py", line 261, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\blocks.py", line 1786, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\blocks.py", line 1350, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 583, in async_iteration
return await iterator.anext()
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 576, in anext
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\anyio_backends_asyncio.py", line 2441, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\anyio_backends_asyncio.py", line 943, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 559, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 742, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\modules\chat.py", line 437, in generate_chat_reply_wrapper
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu']), history
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\modules\html_generator.py", line 326, in chat_html_wrapper
return generate_cai_chat_html(history['visible'], name1, name2, style, character, reset_cache)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\modules\html_generator.py", line 250, in generate_cai_chat_html
row = [convert_to_markdown_wrapped(entry, use_cache=i != len(history) - 1) for entry in _row]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\modules\html_generator.py", line 250, in
row = [convert_to_markdown_wrapped(entry, use_cache=i != len(history) - 1) for entry in row]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\modules\html_generator.py", line 172, in convert_to_markdown_wrapped
return convert_to_markdown.wrapped(string)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\modules\html_generator.py", line 78, in convert_to_markdown
string = re.sub(pattern, replacement, string, flags=re.MULTILINE)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\ChatGPT\text-generation-webui-1.16\installer_files\env\Lib\re_init
.py", line 185, in sub
return _compile(pattern, flags).sub(repl, string, count)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: expected string or bytes-like object, got 'NoneType'


And here is the error from the latest ooba today

Traceback (most recent call last):
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\queueing.py", line 541, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\route_utils.py", line 276, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\blocks.py", line 1928, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\blocks.py", line 1526, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\utils.py", line 657, in async_iteration
return await iterator.anext()
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\utils.py", line 650, in anext
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\anyio_backends_asyncio.py", line 2505, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\anyio_backends_asyncio.py", line 1005, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\utils.py", line 633, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\gradio\utils.py", line 816, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\modules\chat.py", line 443, in generate_chat_reply_wrapper
for i, history in enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True, for_ui=True)):
File "J:\ooba1\text-generation-webui-main\modules\chat.py", line 410, in generate_chat_reply
for history in chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message, for_ui=for_ui):
File "J:\ooba1\text-generation-webui-main\modules\chat.py", line 305, in chatbot_wrapper
stopping_strings = get_stopping_strings(state)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\modules\chat.py", line 265, in get_stopping_strings
prefix_bot, suffix_bot = get_generation_prompt(renderer, impersonate=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\modules\chat.py", line 71, in get_generation_prompt
prompt = renderer(messages=messages)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\jinja2\environment.py", line 1295, in render
self.environment.handle_exception()
File "J:\ooba1\text-generation-webui-main\installer_files\env\Lib\site-packages\jinja2\environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "

Sign up or log in to comment