blob: de5d85c7ee22f1bbb991cafce541f632bf55a2d7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
pkgbase = python-flash-attention
pkgdesc = Fast and memory-efficient exact attention
pkgver = 2.7.3
pkgrel = 1
url = https://github.com/HazyResearch/flash-attention
arch = any
license = Apache
makedepends = ninja
makedepends = python-build
makedepends = python-installer
makedepends = python-packaging
makedepends = python-psutil
makedepends = python-setuptools
makedepends = python-wheel
depends = python-einops
depends = python-pytorch-cuda
source = flash-attention-2.7.3.tar.gz::https://github.com/HazyResearch/flash-attention/archive/refs/tags/v2.7.3.tar.gz
source = cutlass-4c42f73f.tar.gz::https://github.com/NVIDIA/cutlass/archive/4c42f73fdab5787e3bb57717f35a8cb1b3c0dc6d.tar.gz
source = flash-attention.diff
sha256sums = 21a7b82f787d2a33905c45ba10c3275d504c408b744520f7691d9501b7b4c009
sha256sums = d9f1831aef8913fc281429c426ee46d992f69e4afea4c78a0d975f6ad649f994
sha256sums = SKIP
pkgname = python-flash-attention
|