blob: d10f76dacd699e487485cb6cae71b2b7bf0e091a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
|
pkgbase = python-llama-cpp-cuda
pkgdesc = Python bindings for llama.cpp
pkgver = 0.3.8
pkgrel = 1
url = https://github.com/abetlen/llama-cpp-python
arch = x86_64
license = GPL-3.0-or-later
checkdepends = python-pytest
checkdepends = python-huggingface-hub
checkdepends = python-scipy
checkdepends = python-httpx
checkdepends = python-fastapi
checkdepends = python-sse-starlette
checkdepends = python-pydantic-settings
makedepends = python-scikit-build
makedepends = python-installer
makedepends = python-build
makedepends = python-wheel
makedepends = python-scikit-build-core
makedepends = gcc13
depends = python-typing_extensions
depends = python-numpy
depends = python-diskcache
depends = cuda
depends = nvidia-utils
depends = python-transformers
depends = python-jinja
depends = python-huggingface-hub
depends = python-requests
depends = python-openai
optdepends = uvicorn
optdepends = python-fastapi
optdepends = python-pydantic-settings
optdepends = python-sse-starlette
optdepends = python-pyaml
provides = python-llama-cpp
conflicts = python-llama-cpp
source = https://files.pythonhosted.org/packages/source/l/llama-cpp-python/llama_cpp_python-0.3.8.tar.gz
sha256sums = 31c91323b555c025a76a30923cead9f5695da103dd68c15cdbb4509b17f0ed77
pkgname = python-llama-cpp-cuda
|