Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

lib/crypto: riscv/aes: Migrate optimized code into library

Move the aes_encrypt_zvkned() and aes_decrypt_zvkned() assembly
functions into lib/crypto/, wire them up to the AES library API, and
remove the "aes-riscv64-zvkned" crypto_cipher algorithm.

To make this possible, change the prototypes of these functions to
take (rndkeys, key_len) instead of a pointer to crypto_aes_ctx, and
change the RISC-V AES-XTS code to implement tweak encryption using the
AES library instead of directly calling aes_encrypt_zvkned().

The result is that both the AES library and crypto_cipher APIs use
RISC-V's AES instructions, whereas previously only crypto_cipher did
(and it wasn't enabled by default, which this commit fixes as well).

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260112192035.10427-15-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+166 -106
-2
arch/riscv/crypto/Kconfig
··· 6 6 tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS" 7 7 depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ 8 8 RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS 9 - select CRYPTO_ALGAPI 10 9 select CRYPTO_LIB_AES 11 10 select CRYPTO_SKCIPHER 12 11 help 13 - Block cipher: AES cipher algorithms 14 12 Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTS 15 13 16 14 Architecture: riscv64 using:
+11 -1
arch/riscv/crypto/aes-macros.S
··· 51 51 // - If AES-256, loads round keys into v1-v15 and continues onwards. 52 52 // 53 53 // Also sets vl=4 and vtype=e32,m1,ta,ma. Clobbers t0 and t1. 54 - .macro aes_begin keyp, label128, label192 54 + .macro aes_begin keyp, label128, label192, key_len 55 + .ifb \key_len 55 56 lwu t0, 480(\keyp) // t0 = key length in bytes 57 + .endif 56 58 li t1, 24 // t1 = key length for AES-192 57 59 vsetivli zero, 4, e32, m1, ta, ma 58 60 vle32.v v1, (\keyp) ··· 78 76 vle32.v v10, (\keyp) 79 77 addi \keyp, \keyp, 16 80 78 vle32.v v11, (\keyp) 79 + .ifb \key_len 81 80 blt t0, t1, \label128 // If AES-128, goto label128. 81 + .else 82 + blt \key_len, t1, \label128 // If AES-128, goto label128. 83 + .endif 82 84 addi \keyp, \keyp, 16 83 85 vle32.v v12, (\keyp) 84 86 addi \keyp, \keyp, 16 85 87 vle32.v v13, (\keyp) 88 + .ifb \key_len 86 89 beq t0, t1, \label192 // If AES-192, goto label192. 90 + .else 91 + beq \key_len, t1, \label192 // If AES-192, goto label192. 92 + .endif 87 93 // Else, it's AES-256. 88 94 addi \keyp, \keyp, 16 89 95 vle32.v v14, (\keyp)
+5 -76
arch/riscv/crypto/aes-riscv64-glue.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * AES using the RISC-V vector crypto extensions. Includes the bare block 4 - * cipher and the ECB, CBC, CBC-CTS, CTR, and XTS modes. 3 + * AES modes using the RISC-V vector crypto extensions 5 4 * 6 5 * Copyright (C) 2023 VRULL GmbH 7 6 * Author: Heiko Stuebner <heiko.stuebner@vrull.eu> ··· 14 15 #include <asm/simd.h> 15 16 #include <asm/vector.h> 16 17 #include <crypto/aes.h> 17 - #include <crypto/internal/cipher.h> 18 18 #include <crypto/internal/simd.h> 19 19 #include <crypto/internal/skcipher.h> 20 20 #include <crypto/scatterwalk.h> 21 21 #include <crypto/xts.h> 22 22 #include <linux/linkage.h> 23 23 #include <linux/module.h> 24 - 25 - asmlinkage void aes_encrypt_zvkned(const struct crypto_aes_ctx *key, 26 - const u8 in[AES_BLOCK_SIZE], 27 - u8 out[AES_BLOCK_SIZE]); 28 - asmlinkage void aes_decrypt_zvkned(const struct crypto_aes_ctx *key, 29 - const u8 in[AES_BLOCK_SIZE], 30 - u8 out[AES_BLOCK_SIZE]); 31 24 32 25 asmlinkage void aes_ecb_encrypt_zvkned(const struct crypto_aes_ctx *key, 33 26 const u8 *in, u8 *out, size_t len); ··· 77 86 return aes_expandkey(ctx, key, keylen); 78 87 } 79 88 80 - static int riscv64_aes_setkey_cipher(struct crypto_tfm *tfm, 81 - const u8 *key, unsigned int keylen) 82 - { 83 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 84 - 85 - return riscv64_aes_setkey(ctx, key, keylen); 86 - } 87 - 88 89 static int riscv64_aes_setkey_skcipher(struct crypto_skcipher *tfm, 89 90 const u8 *key, unsigned int keylen) 90 91 { 91 92 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 92 93 93 94 return riscv64_aes_setkey(ctx, key, keylen); 94 - } 95 - 96 - /* Bare AES, without a mode of operation */ 97 - 98 - static void riscv64_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 99 - { 100 - const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 101 - 102 - if (crypto_simd_usable()) { 103 - kernel_vector_begin(); 104 - aes_encrypt_zvkned(ctx, src, dst); 105 - kernel_vector_end(); 106 - } else { 107 - aes_encrypt(ctx, dst, src); 108 - } 109 - } 110 - 111 - static void riscv64_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 112 - { 113 - const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 114 - 115 - if (crypto_simd_usable()) { 116 - kernel_vector_begin(); 117 - aes_decrypt_zvkned(ctx, src, dst); 118 - kernel_vector_end(); 119 - } else { 120 - aes_decrypt(ctx, dst, src); 121 - } 122 95 } 123 96 124 97 /* AES-ECB */ ··· 293 338 294 339 struct riscv64_aes_xts_ctx { 295 340 struct crypto_aes_ctx ctx1; 296 - struct crypto_aes_ctx ctx2; 341 + struct aes_enckey tweak_key; 297 342 }; 298 343 299 344 static int riscv64_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, ··· 303 348 304 349 return xts_verify_key(tfm, key, keylen) ?: 305 350 riscv64_aes_setkey(&ctx->ctx1, key, keylen / 2) ?: 306 - riscv64_aes_setkey(&ctx->ctx2, key + keylen / 2, keylen / 2); 351 + aes_prepareenckey(&ctx->tweak_key, key + keylen / 2, keylen / 2); 307 352 } 308 353 309 354 static int riscv64_aes_xts_crypt(struct skcipher_request *req, bool enc) ··· 321 366 return -EINVAL; 322 367 323 368 /* Encrypt the IV with the tweak key to get the first tweak. */ 324 - kernel_vector_begin(); 325 - aes_encrypt_zvkned(&ctx->ctx2, req->iv, req->iv); 326 - kernel_vector_end(); 369 + aes_encrypt(&ctx->tweak_key, req->iv, req->iv); 327 370 328 371 err = skcipher_walk_virt(&walk, req, false); 329 372 ··· 408 455 } 409 456 410 457 /* Algorithm definitions */ 411 - 412 - static struct crypto_alg riscv64_zvkned_aes_cipher_alg = { 413 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 414 - .cra_blocksize = AES_BLOCK_SIZE, 415 - .cra_ctxsize = sizeof(struct crypto_aes_ctx), 416 - .cra_priority = 300, 417 - .cra_name = "aes", 418 - .cra_driver_name = "aes-riscv64-zvkned", 419 - .cra_cipher = { 420 - .cia_min_keysize = AES_MIN_KEY_SIZE, 421 - .cia_max_keysize = AES_MAX_KEY_SIZE, 422 - .cia_setkey = riscv64_aes_setkey_cipher, 423 - .cia_encrypt = riscv64_aes_encrypt, 424 - .cia_decrypt = riscv64_aes_decrypt, 425 - }, 426 - .cra_module = THIS_MODULE, 427 - }; 428 458 429 459 static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = { 430 460 { ··· 510 574 511 575 if (riscv_isa_extension_available(NULL, ZVKNED) && 512 576 riscv_vector_vlen() >= 128) { 513 - err = crypto_register_alg(&riscv64_zvkned_aes_cipher_alg); 514 - if (err) 515 - return err; 516 - 517 577 err = crypto_register_skciphers( 518 578 riscv64_zvkned_aes_skcipher_algs, 519 579 ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs)); 520 580 if (err) 521 - goto unregister_zvkned_cipher_alg; 581 + return err; 522 582 523 583 if (riscv_isa_extension_available(NULL, ZVKB)) { 524 584 err = crypto_register_skcipher( ··· 539 607 unregister_zvkned_skcipher_algs: 540 608 crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs, 541 609 ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs)); 542 - unregister_zvkned_cipher_alg: 543 - crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg); 544 610 return err; 545 611 } 546 612 ··· 550 620 crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg); 551 621 crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs, 552 622 ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs)); 553 - crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg); 554 623 } 555 624 556 625 module_init(riscv64_aes_mod_init);
-27
arch/riscv/crypto/aes-riscv64-zvkned.S
··· 56 56 #define LEN a3 57 57 #define IVP a4 58 58 59 - .macro __aes_crypt_zvkned enc, keylen 60 - vle32.v v16, (INP) 61 - aes_crypt v16, \enc, \keylen 62 - vse32.v v16, (OUTP) 63 - ret 64 - .endm 65 - 66 - .macro aes_crypt_zvkned enc 67 - aes_begin KEYP, 128f, 192f 68 - __aes_crypt_zvkned \enc, 256 69 - 128: 70 - __aes_crypt_zvkned \enc, 128 71 - 192: 72 - __aes_crypt_zvkned \enc, 192 73 - .endm 74 - 75 - // void aes_encrypt_zvkned(const struct crypto_aes_ctx *key, 76 - // const u8 in[16], u8 out[16]); 77 - SYM_FUNC_START(aes_encrypt_zvkned) 78 - aes_crypt_zvkned 1 79 - SYM_FUNC_END(aes_encrypt_zvkned) 80 - 81 - // Same prototype and calling convention as the encryption function 82 - SYM_FUNC_START(aes_decrypt_zvkned) 83 - aes_crypt_zvkned 0 84 - SYM_FUNC_END(aes_decrypt_zvkned) 85 - 86 59 .macro __aes_ecb_crypt enc, keylen 87 60 srli t0, LEN, 2 88 61 // t0 is the remaining length in 32-bit words. It's a multiple of 4.
+2
lib/crypto/Kconfig
··· 17 17 default y if ARM 18 18 default y if ARM64 19 19 default y if PPC && (SPE || (PPC64 && VSX)) 20 + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ 21 + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS 20 22 21 23 config CRYPTO_LIB_AESCFB 22 24 tristate
+1
lib/crypto/Makefile
··· 50 50 endif # !CONFIG_SPE 51 51 endif # CONFIG_PPC 52 52 53 + libaes-$(CONFIG_RISCV) += riscv/aes-riscv64-zvkned.o 53 54 endif # CONFIG_CRYPTO_LIB_AES_ARCH 54 55 55 56 ################################################################################
+84
lib/crypto/riscv/aes-riscv64-zvkned.S
··· 1 + /* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */ 2 + // 3 + // This file is dual-licensed, meaning that you can use it under your 4 + // choice of either of the following two licenses: 5 + // 6 + // Copyright 2023 The OpenSSL Project Authors. All Rights Reserved. 7 + // 8 + // Licensed under the Apache License 2.0 (the "License"). You can obtain 9 + // a copy in the file LICENSE in the source distribution or at 10 + // https://www.openssl.org/source/license.html 11 + // 12 + // or 13 + // 14 + // Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu> 15 + // Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com> 16 + // Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com> 17 + // Copyright 2024 Google LLC 18 + // All rights reserved. 19 + // 20 + // Redistribution and use in source and binary forms, with or without 21 + // modification, are permitted provided that the following conditions 22 + // are met: 23 + // 1. Redistributions of source code must retain the above copyright 24 + // notice, this list of conditions and the following disclaimer. 25 + // 2. Redistributions in binary form must reproduce the above copyright 26 + // notice, this list of conditions and the following disclaimer in the 27 + // documentation and/or other materials provided with the distribution. 28 + // 29 + // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 + // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 + // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 + // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 + // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 + // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 + // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 + // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 + // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 + // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 + // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 + 41 + // The generated code of this file depends on the following RISC-V extensions: 42 + // - RV64I 43 + // - RISC-V Vector ('V') with VLEN >= 128 44 + // - RISC-V Vector AES block cipher extension ('Zvkned') 45 + 46 + #include <linux/linkage.h> 47 + 48 + .text 49 + .option arch, +zvkned 50 + 51 + #include "../../arch/riscv/crypto/aes-macros.S" 52 + 53 + #define RNDKEYS a0 54 + #define KEY_LEN a1 55 + #define OUTP a2 56 + #define INP a3 57 + 58 + .macro __aes_crypt_zvkned enc, keybits 59 + vle32.v v16, (INP) 60 + aes_crypt v16, \enc, \keybits 61 + vse32.v v16, (OUTP) 62 + ret 63 + .endm 64 + 65 + .macro aes_crypt_zvkned enc 66 + aes_begin RNDKEYS, 128f, 192f, KEY_LEN 67 + __aes_crypt_zvkned \enc, 256 68 + 128: 69 + __aes_crypt_zvkned \enc, 128 70 + 192: 71 + __aes_crypt_zvkned \enc, 192 72 + .endm 73 + 74 + // void aes_encrypt_zvkned(const u32 rndkeys[], int key_len, 75 + // u8 out[AES_BLOCK_SIZE], const u8 in[AES_BLOCK_SIZE]); 76 + SYM_FUNC_START(aes_encrypt_zvkned) 77 + aes_crypt_zvkned 1 78 + SYM_FUNC_END(aes_encrypt_zvkned) 79 + 80 + // void aes_decrypt_zvkned(const u32 rndkeys[], int key_len, 81 + // u8 out[AES_BLOCK_SIZE], const u8 in[AES_BLOCK_SIZE]); 82 + SYM_FUNC_START(aes_decrypt_zvkned) 83 + aes_crypt_zvkned 0 84 + SYM_FUNC_END(aes_decrypt_zvkned)
+63
lib/crypto/riscv/aes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2023 VRULL GmbH 4 + * Copyright (C) 2023 SiFive, Inc. 5 + * Copyright 2024 Google LLC 6 + */ 7 + 8 + #include <asm/simd.h> 9 + #include <asm/vector.h> 10 + 11 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_zvkned); 12 + 13 + void aes_encrypt_zvkned(const u32 rndkeys[], int key_len, 14 + u8 out[AES_BLOCK_SIZE], const u8 in[AES_BLOCK_SIZE]); 15 + void aes_decrypt_zvkned(const u32 rndkeys[], int key_len, 16 + u8 out[AES_BLOCK_SIZE], const u8 in[AES_BLOCK_SIZE]); 17 + 18 + static void aes_preparekey_arch(union aes_enckey_arch *k, 19 + union aes_invkey_arch *inv_k, 20 + const u8 *in_key, int key_len, int nrounds) 21 + { 22 + aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL, 23 + in_key, key_len); 24 + } 25 + 26 + static void aes_encrypt_arch(const struct aes_enckey *key, 27 + u8 out[AES_BLOCK_SIZE], 28 + const u8 in[AES_BLOCK_SIZE]) 29 + { 30 + if (static_branch_likely(&have_zvkned) && likely(may_use_simd())) { 31 + kernel_vector_begin(); 32 + aes_encrypt_zvkned(key->k.rndkeys, key->len, out, in); 33 + kernel_vector_end(); 34 + } else { 35 + aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in); 36 + } 37 + } 38 + 39 + static void aes_decrypt_arch(const struct aes_key *key, 40 + u8 out[AES_BLOCK_SIZE], 41 + const u8 in[AES_BLOCK_SIZE]) 42 + { 43 + /* 44 + * Note that the Zvkned code uses the standard round keys, while the 45 + * fallback uses the inverse round keys. Thus both must be present. 46 + */ 47 + if (static_branch_likely(&have_zvkned) && likely(may_use_simd())) { 48 + kernel_vector_begin(); 49 + aes_decrypt_zvkned(key->k.rndkeys, key->len, out, in); 50 + kernel_vector_end(); 51 + } else { 52 + aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds, 53 + out, in); 54 + } 55 + } 56 + 57 + #define aes_mod_init_arch aes_mod_init_arch 58 + static void aes_mod_init_arch(void) 59 + { 60 + if (riscv_isa_extension_available(NULL, ZVKNED) && 61 + riscv_vector_vlen() >= 128) 62 + static_branch_enable(&have_zvkned); 63 + }