blob: 73c89c683a5697ef807ff8a5539ee6c57532ec6b (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
pkgbase = python-llama-cpp-cuda
pkgdesc = Python bindings for llama.cpp
pkgver = 0.3.6
pkgrel = 1
url = https://github.com/abetlen/llama-cpp-python
arch = any
license = GPL-3.0-or-later
checkdepends = python-pytest
checkdepends = python-huggingface-hub
checkdepends = python-scipy
checkdepends = python-httpx
checkdepends = python-fastapi
checkdepends = python-sse-starlette
checkdepends = python-pydantic-settings
makedepends = python-scikit-build
makedepends = python-installer
makedepends = python-build
makedepends = python-wheel
makedepends = python-scikit-build-core
makedepends = gcc13
depends = python-typing_extensions
depends = python-numpy
depends = python-diskcache
depends = cuda
optdepends = uvicorn
optdepends = python-fastapi
optdepends = python-pydantic-settings
optdepends = python-sse-starlette
optdepends = python-pyaml
provides = python-llama-cpp
conflicts = python-llama-cpp
source = https://files.pythonhosted.org/packages/source/l/llama-cpp-python/llama_cpp_python-0.3.6.tar.gz
sha256sums = 86e35a8888274466958e24201b856cd71c8def0ea72e14312be13da96c15c7a4
pkgname = python-llama-cpp-cuda
|