Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

lib/crypto: x86/polyval: Migrate optimized code into library

Migrate the x86_64 implementation of POLYVAL into lib/crypto/, wiring it
up to the POLYVAL library interface. This makes the POLYVAL library be
properly optimized on x86_64.

This drops the x86_64 optimizations of polyval in the crypto_shash API.
That's fine, since polyval will be removed from crypto_shash entirely
since it is unneeded there. But even if it comes back, the crypto_shash
API could just be implemented on top of the library API, as usual.

Adjust the names and prototypes of the assembly functions to align more
closely with the rest of the library code.

Also replace a movaps instruction with movups to remove the assumption
that the key struct is 16-byte aligned. Users can still align the key
if they want (and at least in this case, movups is just as fast as
movaps), but it's inconvenient to require it.

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20251109234726.638437-6-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+107 -214
-10
arch/x86/crypto/Kconfig
··· 353 353 Architecture: x86_64 using: 354 354 - AVX2 (Advanced Vector Extensions 2) 355 355 356 - config CRYPTO_POLYVAL_CLMUL_NI 357 - tristate "Hash functions: POLYVAL (CLMUL-NI)" 358 - depends on 64BIT 359 - select CRYPTO_POLYVAL 360 - help 361 - POLYVAL hash function for HCTR2 362 - 363 - Architecture: x86_64 using: 364 - - CLMUL-NI (carry-less multiplication new instructions) 365 - 366 356 config CRYPTO_SM3_AVX_X86_64 367 357 tristate "Hash functions: SM3 (AVX)" 368 358 depends on 64BIT
-3
arch/x86/crypto/Makefile
··· 52 52 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o 53 53 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o 54 54 55 - obj-$(CONFIG_CRYPTO_POLYVAL_CLMUL_NI) += polyval-clmulni.o 56 - polyval-clmulni-y := polyval-clmulni_asm.o polyval-clmulni_glue.o 57 - 58 55 obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o 59 56 nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o 60 57 obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
+19 -21
arch/x86/crypto/polyval-clmulni_asm.S lib/crypto/x86/polyval-pclmul-avx.S
··· 36 36 #define MI %xmm14 37 37 #define SUM %xmm15 38 38 39 - #define KEY_POWERS %rdi 40 - #define MSG %rsi 41 - #define BLOCKS_LEFT %rdx 42 - #define ACCUMULATOR %rcx 39 + #define ACCUMULATOR %rdi 40 + #define KEY_POWERS %rsi 41 + #define MSG %rdx 42 + #define BLOCKS_LEFT %rcx 43 43 #define TMP %rax 44 44 45 45 .section .rodata.cst16.gstar, "aM", @progbits, 16 ··· 234 234 235 235 movups (MSG), %xmm0 236 236 pxor SUM, %xmm0 237 - movaps (KEY_POWERS), %xmm1 237 + movups (KEY_POWERS), %xmm1 238 238 schoolbook1_noload 239 239 dec BLOCKS_LEFT 240 240 addq $16, MSG ··· 261 261 .endm 262 262 263 263 /* 264 - * Perform montgomery multiplication in GF(2^128) and store result in op1. 264 + * Computes a = a * b * x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1. 265 265 * 266 - * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 267 - * If op1, op2 are in montgomery form, this computes the montgomery 268 - * form of op1*op2. 269 - * 270 - * void clmul_polyval_mul(u8 *op1, const u8 *op2); 266 + * void polyval_mul_pclmul_avx(struct polyval_elem *a, 267 + * const struct polyval_elem *b); 271 268 */ 272 - SYM_FUNC_START(clmul_polyval_mul) 269 + SYM_FUNC_START(polyval_mul_pclmul_avx) 273 270 FRAME_BEGIN 274 271 vmovdqa .Lgstar(%rip), GSTAR 275 272 movups (%rdi), %xmm0 ··· 277 280 movups SUM, (%rdi) 278 281 FRAME_END 279 282 RET 280 - SYM_FUNC_END(clmul_polyval_mul) 283 + SYM_FUNC_END(polyval_mul_pclmul_avx) 281 284 282 285 /* 283 286 * Perform polynomial evaluation as specified by POLYVAL. This computes: 284 287 * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} 285 288 * where n=nblocks, h is the hash key, and m_i are the message blocks. 286 289 * 287 - * rdi - pointer to precomputed key powers h^8 ... h^1 288 - * rsi - pointer to message blocks 289 - * rdx - number of blocks to hash 290 - * rcx - pointer to the accumulator 290 + * rdi - pointer to the accumulator 291 + * rsi - pointer to precomputed key powers h^8 ... h^1 292 + * rdx - pointer to message blocks 293 + * rcx - number of blocks to hash 291 294 * 292 - * void clmul_polyval_update(const struct polyval_tfm_ctx *keys, 293 - * const u8 *in, size_t nblocks, u8 *accumulator); 295 + * void polyval_blocks_pclmul_avx(struct polyval_elem *acc, 296 + * const struct polyval_key *key, 297 + * const u8 *data, size_t nblocks); 294 298 */ 295 - SYM_FUNC_START(clmul_polyval_update) 299 + SYM_FUNC_START(polyval_blocks_pclmul_avx) 296 300 FRAME_BEGIN 297 301 vmovdqa .Lgstar(%rip), GSTAR 298 302 movups (ACCUMULATOR), SUM ··· 316 318 movups SUM, (ACCUMULATOR) 317 319 FRAME_END 318 320 RET 319 - SYM_FUNC_END(clmul_polyval_update) 321 + SYM_FUNC_END(polyval_blocks_pclmul_avx)
-180
arch/x86/crypto/polyval-clmulni_glue.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Glue code for POLYVAL using PCMULQDQ-NI 4 - * 5 - * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> 6 - * Copyright (c) 2009 Intel Corp. 7 - * Author: Huang Ying <ying.huang@intel.com> 8 - * Copyright 2021 Google LLC 9 - */ 10 - 11 - /* 12 - * Glue code based on ghash-clmulni-intel_glue.c. 13 - * 14 - * This implementation of POLYVAL uses montgomery multiplication 15 - * accelerated by PCLMULQDQ-NI to implement the finite field 16 - * operations. 17 - */ 18 - 19 - #include <asm/cpu_device_id.h> 20 - #include <asm/fpu/api.h> 21 - #include <crypto/internal/hash.h> 22 - #include <crypto/polyval.h> 23 - #include <crypto/utils.h> 24 - #include <linux/errno.h> 25 - #include <linux/kernel.h> 26 - #include <linux/module.h> 27 - #include <linux/string.h> 28 - 29 - #define POLYVAL_ALIGN 16 30 - #define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN) 31 - #define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) 32 - #define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA) 33 - #define NUM_KEY_POWERS 8 34 - 35 - struct polyval_tfm_ctx { 36 - /* 37 - * These powers must be in the order h^8, ..., h^1. 38 - */ 39 - u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR; 40 - }; 41 - 42 - struct polyval_desc_ctx { 43 - u8 buffer[POLYVAL_BLOCK_SIZE]; 44 - }; 45 - 46 - asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys, 47 - const u8 *in, size_t nblocks, u8 *accumulator); 48 - asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2); 49 - 50 - static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm) 51 - { 52 - return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN); 53 - } 54 - 55 - static void internal_polyval_update(const struct polyval_tfm_ctx *keys, 56 - const u8 *in, size_t nblocks, u8 *accumulator) 57 - { 58 - kernel_fpu_begin(); 59 - clmul_polyval_update(keys, in, nblocks, accumulator); 60 - kernel_fpu_end(); 61 - } 62 - 63 - static void internal_polyval_mul(u8 *op1, const u8 *op2) 64 - { 65 - kernel_fpu_begin(); 66 - clmul_polyval_mul(op1, op2); 67 - kernel_fpu_end(); 68 - } 69 - 70 - static int polyval_x86_setkey(struct crypto_shash *tfm, 71 - const u8 *key, unsigned int keylen) 72 - { 73 - struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm); 74 - int i; 75 - 76 - if (keylen != POLYVAL_BLOCK_SIZE) 77 - return -EINVAL; 78 - 79 - memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE); 80 - 81 - for (i = NUM_KEY_POWERS-2; i >= 0; i--) { 82 - memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE); 83 - internal_polyval_mul(tctx->key_powers[i], 84 - tctx->key_powers[i+1]); 85 - } 86 - 87 - return 0; 88 - } 89 - 90 - static int polyval_x86_init(struct shash_desc *desc) 91 - { 92 - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); 93 - 94 - memset(dctx, 0, sizeof(*dctx)); 95 - 96 - return 0; 97 - } 98 - 99 - static int polyval_x86_update(struct shash_desc *desc, 100 - const u8 *src, unsigned int srclen) 101 - { 102 - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); 103 - const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); 104 - unsigned int nblocks; 105 - 106 - do { 107 - /* Allow rescheduling every 4K bytes. */ 108 - nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; 109 - internal_polyval_update(tctx, src, nblocks, dctx->buffer); 110 - srclen -= nblocks * POLYVAL_BLOCK_SIZE; 111 - src += nblocks * POLYVAL_BLOCK_SIZE; 112 - } while (srclen >= POLYVAL_BLOCK_SIZE); 113 - 114 - return srclen; 115 - } 116 - 117 - static int polyval_x86_finup(struct shash_desc *desc, const u8 *src, 118 - unsigned int len, u8 *dst) 119 - { 120 - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); 121 - const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); 122 - 123 - if (len) { 124 - crypto_xor(dctx->buffer, src, len); 125 - internal_polyval_mul(dctx->buffer, 126 - tctx->key_powers[NUM_KEY_POWERS-1]); 127 - } 128 - 129 - memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE); 130 - 131 - return 0; 132 - } 133 - 134 - static struct shash_alg polyval_alg = { 135 - .digestsize = POLYVAL_DIGEST_SIZE, 136 - .init = polyval_x86_init, 137 - .update = polyval_x86_update, 138 - .finup = polyval_x86_finup, 139 - .setkey = polyval_x86_setkey, 140 - .descsize = sizeof(struct polyval_desc_ctx), 141 - .base = { 142 - .cra_name = "polyval", 143 - .cra_driver_name = "polyval-clmulni", 144 - .cra_priority = 200, 145 - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, 146 - .cra_blocksize = POLYVAL_BLOCK_SIZE, 147 - .cra_ctxsize = POLYVAL_CTX_SIZE, 148 - .cra_module = THIS_MODULE, 149 - }, 150 - }; 151 - 152 - __maybe_unused static const struct x86_cpu_id pcmul_cpu_id[] = { 153 - X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), 154 - {} 155 - }; 156 - MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); 157 - 158 - static int __init polyval_clmulni_mod_init(void) 159 - { 160 - if (!x86_match_cpu(pcmul_cpu_id)) 161 - return -ENODEV; 162 - 163 - if (!boot_cpu_has(X86_FEATURE_AVX)) 164 - return -ENODEV; 165 - 166 - return crypto_register_shash(&polyval_alg); 167 - } 168 - 169 - static void __exit polyval_clmulni_mod_exit(void) 170 - { 171 - crypto_unregister_shash(&polyval_alg); 172 - } 173 - 174 - module_init(polyval_clmulni_mod_init); 175 - module_exit(polyval_clmulni_mod_exit); 176 - 177 - MODULE_LICENSE("GPL"); 178 - MODULE_DESCRIPTION("POLYVAL hash function accelerated by PCLMULQDQ-NI"); 179 - MODULE_ALIAS_CRYPTO("polyval"); 180 - MODULE_ALIAS_CRYPTO("polyval-clmulni");
+3
include/crypto/polyval.h
··· 48 48 #ifdef CONFIG_ARM64 49 49 /** @h_powers: Powers of the hash key H^8 through H^1 */ 50 50 struct polyval_elem h_powers[8]; 51 + #elif defined(CONFIG_X86) 52 + /** @h_powers: Powers of the hash key H^8 through H^1 */ 53 + struct polyval_elem h_powers[8]; 51 54 #else 52 55 #error "Unhandled arch" 53 56 #endif
+1
lib/crypto/Kconfig
··· 145 145 bool 146 146 depends on CRYPTO_LIB_POLYVAL && !UML 147 147 default y if ARM64 && KERNEL_MODE_NEON 148 + default y if X86_64 148 149 149 150 config CRYPTO_LIB_CHACHA20POLY1305 150 151 tristate
+1
lib/crypto/Makefile
··· 203 203 ifeq ($(CONFIG_CRYPTO_LIB_POLYVAL_ARCH),y) 204 204 CFLAGS_polyval.o += -I$(src)/$(SRCARCH) 205 205 libpolyval-$(CONFIG_ARM64) += arm64/polyval-ce-core.o 206 + libpolyval-$(CONFIG_X86) += x86/polyval-pclmul-avx.o 206 207 endif 207 208 208 209 ################################################################################
+83
lib/crypto/x86/polyval.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * POLYVAL library functions, x86_64 optimized 4 + * 5 + * Copyright 2025 Google LLC 6 + */ 7 + #include <asm/fpu/api.h> 8 + #include <linux/cpufeature.h> 9 + 10 + #define NUM_H_POWERS 8 11 + 12 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmul_avx); 13 + 14 + asmlinkage void polyval_mul_pclmul_avx(struct polyval_elem *a, 15 + const struct polyval_elem *b); 16 + asmlinkage void polyval_blocks_pclmul_avx(struct polyval_elem *acc, 17 + const struct polyval_key *key, 18 + const u8 *data, size_t nblocks); 19 + 20 + static void polyval_preparekey_arch(struct polyval_key *key, 21 + const u8 raw_key[POLYVAL_BLOCK_SIZE]) 22 + { 23 + static_assert(ARRAY_SIZE(key->h_powers) == NUM_H_POWERS); 24 + memcpy(&key->h_powers[NUM_H_POWERS - 1], raw_key, POLYVAL_BLOCK_SIZE); 25 + if (static_branch_likely(&have_pclmul_avx) && irq_fpu_usable()) { 26 + kernel_fpu_begin(); 27 + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { 28 + key->h_powers[i] = key->h_powers[i + 1]; 29 + polyval_mul_pclmul_avx( 30 + &key->h_powers[i], 31 + &key->h_powers[NUM_H_POWERS - 1]); 32 + } 33 + kernel_fpu_end(); 34 + } else { 35 + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { 36 + key->h_powers[i] = key->h_powers[i + 1]; 37 + polyval_mul_generic(&key->h_powers[i], 38 + &key->h_powers[NUM_H_POWERS - 1]); 39 + } 40 + } 41 + } 42 + 43 + static void polyval_mul_arch(struct polyval_elem *acc, 44 + const struct polyval_key *key) 45 + { 46 + if (static_branch_likely(&have_pclmul_avx) && irq_fpu_usable()) { 47 + kernel_fpu_begin(); 48 + polyval_mul_pclmul_avx(acc, &key->h_powers[NUM_H_POWERS - 1]); 49 + kernel_fpu_end(); 50 + } else { 51 + polyval_mul_generic(acc, &key->h_powers[NUM_H_POWERS - 1]); 52 + } 53 + } 54 + 55 + static void polyval_blocks_arch(struct polyval_elem *acc, 56 + const struct polyval_key *key, 57 + const u8 *data, size_t nblocks) 58 + { 59 + if (static_branch_likely(&have_pclmul_avx) && irq_fpu_usable()) { 60 + do { 61 + /* Allow rescheduling every 4 KiB. */ 62 + size_t n = min_t(size_t, nblocks, 63 + 4096 / POLYVAL_BLOCK_SIZE); 64 + 65 + kernel_fpu_begin(); 66 + polyval_blocks_pclmul_avx(acc, key, data, n); 67 + kernel_fpu_end(); 68 + data += n * POLYVAL_BLOCK_SIZE; 69 + nblocks -= n; 70 + } while (nblocks); 71 + } else { 72 + polyval_blocks_generic(acc, &key->h_powers[NUM_H_POWERS - 1], 73 + data, nblocks); 74 + } 75 + } 76 + 77 + #define polyval_mod_init_arch polyval_mod_init_arch 78 + static void polyval_mod_init_arch(void) 79 + { 80 + if (boot_cpu_has(X86_FEATURE_PCLMULQDQ) && 81 + boot_cpu_has(X86_FEATURE_AVX)) 82 + static_branch_enable(&have_pclmul_avx); 83 + }