diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-12 22:08:09 +1200 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-12 22:08:09 +1200 |
| commit | 187d0801404f415f22c0b31531982c7ea97fa341 (patch) | |
| tree | 6df127ce340741832b653fba9a0bd6c948a79acd /lib/crypto/blake2b.c | |
| parent | 35ebee7e720944a66befb5899c72ce1e01dfa44e (diff) | |
| parent | f6a458746f905adb7d70e50e8b9383dc9e3fd75f (diff) | |
| download | linux-master.tar.gz | |
Merge tag 'libcrypto-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linuxHEADmaster
Pull crypto library fixes from Eric Biggers:
"Fixes for some recent regressions as well as some longstanding issues:
- Fix incorrect output from the arm64 NEON implementation of GHASH
- Merge the ksimd scopes in the arm64 XTS code to reduce stack usage
- Roll up the BLAKE2b round loop on 32-bit kernels to greatly reduce
code size and stack usage
- Add missing RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS dependency
- Fix chacha-riscv64-zvkb.S to not use frame pointer for data"
* tag 'libcrypto-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux:
crypto: arm64/ghash - Fix incorrect output from ghash-neon
crypto/arm64: sm4/xts - Merge ksimd scopes to reduce stack bloat
crypto/arm64: aes/xts - Use single ksimd scope to reduce stack bloat
lib/crypto: blake2s: Replace manual unrolling with unrolled_full
lib/crypto: blake2b: Roll up BLAKE2b round loop on 32-bit
lib/crypto: riscv: Depend on RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
lib/crypto: riscv/chacha: Avoid s0/fp register
Diffstat (limited to 'lib/crypto/blake2b.c')
| -rw-r--r-- | lib/crypto/blake2b.c | 44 |
1 files changed, 20 insertions, 24 deletions
diff --git a/lib/crypto/blake2b.c b/lib/crypto/blake2b.c index 09c6d65d8a6e63..581b7f8486fae8 100644 --- a/lib/crypto/blake2b.c +++ b/lib/crypto/blake2b.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/unroll.h> #include <linux/types.h> static const u8 blake2b_sigma[12][16] = { @@ -73,31 +74,26 @@ blake2b_compress_generic(struct blake2b_ctx *ctx, b = ror64(b ^ c, 63); \ } while (0) -#define ROUND(r) do { \ - G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ - G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ - G(r, 2, v[2], v[ 6], v[10], v[14]); \ - G(r, 3, v[3], v[ 7], v[11], v[15]); \ - G(r, 4, v[0], v[ 5], v[10], v[15]); \ - G(r, 5, v[1], v[ 6], v[11], v[12]); \ - G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ - G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ -} while (0) - ROUND(0); - ROUND(1); - ROUND(2); - ROUND(3); - ROUND(4); - ROUND(5); - ROUND(6); - ROUND(7); - ROUND(8); - ROUND(9); - ROUND(10); - ROUND(11); - +#ifdef CONFIG_64BIT + /* + * Unroll the rounds loop to enable constant-folding of the + * blake2b_sigma values. Seems worthwhile on 64-bit kernels. + * Not worthwhile on 32-bit kernels because the code size is + * already so large there due to BLAKE2b using 64-bit words. + */ + unrolled_full +#endif + for (int r = 0; r < 12; r++) { + G(r, 0, v[0], v[4], v[8], v[12]); + G(r, 1, v[1], v[5], v[9], v[13]); + G(r, 2, v[2], v[6], v[10], v[14]); + G(r, 3, v[3], v[7], v[11], v[15]); + G(r, 4, v[0], v[5], v[10], v[15]); + G(r, 5, v[1], v[6], v[11], v[12]); + G(r, 6, v[2], v[7], v[8], v[13]); + G(r, 7, v[3], v[4], v[9], v[14]); + } #undef G -#undef ROUND for (i = 0; i < 8; ++i) ctx->h[i] ^= v[i] ^ v[i + 8]; |
