vertyco/gpt-api

Docker error

Closed this issue · 3 comments

Set all values in the env and provided its file path in the docker compose file
but when i tried to do docker-compose up

gpt-api  |   File "<frozen runpy>", line 198, in _run_module_as_main
gpt-api  |   File "<frozen runpy>", line 88, in _run_code
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/__main__.py", line 4, in <module>
gpt-api  |     uvicorn.main()
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/click/core.py", line 1130, in __call__
gpt-api  |     return self.main(*args, **kwargs)
gpt-api  |            ^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/click/core.py", line 1055, in main
gpt-api  |     rv = self.invoke(ctx)
gpt-api  |          ^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/click/core.py", line 1404, in invoke
gpt-api  |     return ctx.invoke(self.callback, **ctx.params)
gpt-api  |            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/click/core.py", line 760, in invoke
gpt-api  |     return __callback(*args, **kwargs)
gpt-api  |            ^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/main.py", line 410, in main
gpt-api  |     run(
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/main.py", line 578, in run
gpt-api  |     server.run()
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/server.py", line 61, in run
gpt-api  |     return asyncio.run(self.serve(sockets=sockets))
gpt-api  |            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/asyncio/runners.py", line 190, in run
gpt-api  |     return runner.run(main)
gpt-api  |            ^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/asyncio/runners.py", line 118, in run
gpt-api  |     return self._loop.run_until_complete(task)
gpt-api  |            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/asyncio/base_events.py", line 653, in run_until_complete
gpt-api  |     return future.result()
gpt-api  |            ^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/server.py", line 68, in serve
gpt-api  |     config.load()
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/config.py", line 473, in load
gpt-api  |     self.loaded_app = import_from_string(self.app)
gpt-api  |                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/uvicorn/importer.py", line 21, in import_from_string
gpt-api  |     module = importlib.import_module(module_str)
gpt-api  |              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/importlib/__init__.py", line 126, in import_module
gpt-api  |     return _bootstrap._gcd_import(name[level:], package, level)
gpt-api  |            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "<frozen importlib._bootstrap>", line 1204, in _gcd_import
gpt-api  |   File "<frozen importlib._bootstrap>", line 1176, in _find_and_load
gpt-api  |   File "<frozen importlib._bootstrap>", line 1147, in _find_and_load_unlocked
gpt-api  |   File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
gpt-api  |   File "<frozen importlib._bootstrap_external>", line 940, in exec_module
gpt-api  |   File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
gpt-api  |   File "/app/src/api.py", line 9, in <module>
gpt-api  |     from gpt4all import GPT4All
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/gpt4all/__init__.py", line 1, in <module>
gpt-api  |     from . import gpt4all # noqa
gpt-api  |     ^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/gpt4all/gpt4all.py", line 6, in <module>
gpt-api  |     from . import pyllmodel
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/gpt4all/pyllmodel.py", line 39, in <module>
gpt-api  |     llmodel, llama = load_llmodel_library()
gpt-api  |                      ^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/site-packages/gpt4all/pyllmodel.py", line 32, in load_llmodel_library
gpt-api  |     llama_lib = ctypes.CDLL(llama_dir, mode=ctypes.RTLD_GLOBAL)
gpt-api  |                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  |   File "/usr/local/lib/python3.11/ctypes/__init__.py", line 376, in __init__
gpt-api  |     self._handle = _dlopen(self._name, mode)
gpt-api  |                    ^^^^^^^^^^^^^^^^^^^^^^^^^
gpt-api  | OSError: /usr/local/lib/python3.11/site-packages/gpt4all/llmodel_DO_NOT_MODIFY/build/libllama.so: cannot open shared object file: No such file or director```

The docker stuff isn't quite finished yet, been neglecting it since i host using systemd instead

Please try running this again, should be resolved. if not show your docker-compose file.

closing this for no response