blob: a692e06073acdf7385991914d75e01bb100cc787 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
pkgbase = python-flash-attn
pkgdesc = Fast and memory-efficient exact attention
pkgver = 2.7.4.post1
pkgrel = 1
url = https://github.com/Dao-AILab/flash-attention
arch = x86_64
license = BSD-3-Clause
makedepends = git
makedepends = ninja
makedepends = python-build
makedepends = python-psutil
makedepends = python-installer
makedepends = python-setuptools
makedepends = python-wheel
depends = python-pytorch-opt-cuda
depends = python-einops
source = flash-attention::git+https://github.com/Dao-AILab/flash-attention.git#tag=v2.7.4.post1
source = 0001-fix-building-torch-extension-with-glog.patch
sha256sums = 341db34ccbfc6290649ac881aba41e10f3f15341d26bf1e0d543cbda1f582bbf
sha256sums = aee8efe41561aca9ca25f0495720998e258119df1f6d59fa60be684e81793c7c
pkgname = python-flash-attn
|