Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

crypto: adiantum - Drop support for asynchronous xchacha ciphers

This feature isn't useful in practice. Simplify and streamline the code
in the synchronous case, i.e. the case that actually matters, instead.

For example, by no longer having to support resuming the calculation
after an asynchronous return of the xchacha cipher, we can just keep
more of the state on the stack instead of in the request context.

Link: https://lore.kernel.org/r/20251211011846.8179-10-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+71 -105
+71 -105
crypto/adiantum.c
··· 83 83 }; 84 84 85 85 struct adiantum_request_ctx { 86 - 87 - /* 88 - * Buffer for right-hand part of data, i.e. 89 - * 90 - * P_L => P_M => C_M => C_R when encrypting, or 91 - * C_R => C_M => P_M => P_L when decrypting. 92 - * 93 - * Also used to build the IV for the stream cipher. 94 - */ 95 - union { 96 - u8 bytes[XCHACHA_IV_SIZE]; 97 - __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)]; 98 - le128 bignum; /* interpret as element of Z/(2^{128}Z) */ 99 - } rbuf; 100 - 101 - bool enc; /* true if encrypting, false if decrypting */ 102 - 103 - /* 104 - * The result of the Poly1305 ε-∆U hash function applied to 105 - * (bulk length, tweak) 106 - */ 107 - le128 header_hash; 108 - 109 86 /* 110 87 * skcipher sub-request size is unknown at compile-time, so it needs to 111 88 * go after the members with known sizes. ··· 193 216 194 217 /* 195 218 * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the 196 - * result to rctx->header_hash. This is the calculation 219 + * result to @out. This is the calculation 197 220 * 198 221 * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T) 199 222 * ··· 203 226 * inputs only) taken over the left-hand part (the "bulk") of the message, to 204 227 * give the overall Adiantum hash of the (tweak, left-hand part) pair. 205 228 */ 206 - static void adiantum_hash_header(struct skcipher_request *req) 229 + static void adiantum_hash_header(struct skcipher_request *req, le128 *out) 207 230 { 208 231 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 209 232 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 210 - struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 211 233 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 212 234 struct { 213 235 __le64 message_bits; ··· 226 250 poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv, 227 251 TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1); 228 252 229 - poly1305_core_emit(&state, NULL, &rctx->header_hash); 253 + poly1305_core_emit(&state, NULL, out); 230 254 } 231 255 232 256 /* Pass the next NH hash value through Poly1305 */ ··· 365 389 nhpoly1305_final(&rctx->u.hash_ctx, tctx, out); 366 390 } 367 391 368 - /* Continue Adiantum encryption/decryption after the stream cipher step */ 369 - static int adiantum_finish(struct skcipher_request *req) 370 - { 371 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 372 - const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 373 - struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 374 - const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 375 - struct scatterlist *dst = req->dst; 376 - le128 digest; 377 - 378 - /* If decrypting, decrypt C_M with the block cipher to get P_M */ 379 - if (!rctx->enc) 380 - crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes, 381 - rctx->rbuf.bytes); 382 - 383 - /* 384 - * Second hash step 385 - * enc: C_R = C_M - H_{K_H}(T, C_L) 386 - * dec: P_R = P_M - H_{K_H}(T, P_L) 387 - */ 388 - le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); 389 - if (dst->length >= req->cryptlen && 390 - dst->offset + req->cryptlen <= PAGE_SIZE) { 391 - /* Fast path for single-page destination */ 392 - struct page *page = sg_page(dst); 393 - void *virt = kmap_local_page(page) + dst->offset; 394 - 395 - nhpoly1305_init(&rctx->u.hash_ctx); 396 - nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len); 397 - nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest); 398 - le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 399 - memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); 400 - flush_dcache_page(page); 401 - kunmap_local(virt); 402 - } else { 403 - /* Slow path that works for any destination scatterlist */ 404 - adiantum_hash_message(req, dst, &digest); 405 - le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 406 - memcpy_to_sglist(dst, bulk_len, &rctx->rbuf.bignum, 407 - sizeof(le128)); 408 - } 409 - return 0; 410 - } 411 - 412 - static void adiantum_streamcipher_done(void *data, int err) 413 - { 414 - struct skcipher_request *req = data; 415 - 416 - if (!err) 417 - err = adiantum_finish(req); 418 - 419 - skcipher_request_complete(req, err); 420 - } 421 - 422 392 static int adiantum_crypt(struct skcipher_request *req, bool enc) 423 393 { 424 394 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 425 395 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 426 396 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 427 397 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 428 - struct scatterlist *src = req->src; 398 + struct scatterlist *src = req->src, *dst = req->dst; 399 + /* 400 + * Buffer for right-hand part of data, i.e. 401 + * 402 + * P_L => P_M => C_M => C_R when encrypting, or 403 + * C_R => C_M => P_M => P_L when decrypting. 404 + * 405 + * Also used to build the IV for the stream cipher. 406 + */ 407 + union { 408 + u8 bytes[XCHACHA_IV_SIZE]; 409 + __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)]; 410 + le128 bignum; /* interpret as element of Z/(2^{128}Z) */ 411 + } rbuf; 412 + le128 header_hash, msg_hash; 429 413 unsigned int stream_len; 430 - le128 digest; 414 + int err; 431 415 432 416 if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) 433 417 return -EINVAL; 434 - 435 - rctx->enc = enc; 436 418 437 419 /* 438 420 * First hash step 439 421 * enc: P_M = P_R + H_{K_H}(T, P_L) 440 422 * dec: C_M = C_R + H_{K_H}(T, C_L) 441 423 */ 442 - adiantum_hash_header(req); 424 + adiantum_hash_header(req, &header_hash); 443 425 if (src->length >= req->cryptlen && 444 426 src->offset + req->cryptlen <= PAGE_SIZE) { 445 427 /* Fast path for single-page source */ ··· 405 471 406 472 nhpoly1305_init(&rctx->u.hash_ctx); 407 473 nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len); 408 - nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest); 409 - memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128)); 474 + nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash); 475 + memcpy(&rbuf.bignum, virt + bulk_len, sizeof(le128)); 410 476 kunmap_local(virt); 411 477 } else { 412 478 /* Slow path that works for any source scatterlist */ 413 - adiantum_hash_message(req, src, &digest); 414 - memcpy_from_sglist(&rctx->rbuf.bignum, src, bulk_len, 415 - sizeof(le128)); 479 + adiantum_hash_message(req, src, &msg_hash); 480 + memcpy_from_sglist(&rbuf.bignum, src, bulk_len, sizeof(le128)); 416 481 } 417 - le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); 418 - le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 482 + le128_add(&rbuf.bignum, &rbuf.bignum, &header_hash); 483 + le128_add(&rbuf.bignum, &rbuf.bignum, &msg_hash); 419 484 420 485 /* If encrypting, encrypt P_M with the block cipher to get C_M */ 421 486 if (enc) 422 - crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes, 423 - rctx->rbuf.bytes); 487 + crypto_cipher_encrypt_one(tctx->blockcipher, rbuf.bytes, 488 + rbuf.bytes); 424 489 425 490 /* Initialize the rest of the XChaCha IV (first part is C_M) */ 426 491 BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16); 427 492 BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */ 428 - rctx->rbuf.words[4] = cpu_to_le32(1); 429 - rctx->rbuf.words[5] = 0; 430 - rctx->rbuf.words[6] = 0; 431 - rctx->rbuf.words[7] = 0; 493 + rbuf.words[4] = cpu_to_le32(1); 494 + rbuf.words[5] = 0; 495 + rbuf.words[6] = 0; 496 + rbuf.words[7] = 0; 432 497 433 498 /* 434 499 * XChaCha needs to be done on all the data except the last 16 bytes; ··· 444 511 445 512 skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher); 446 513 skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src, 447 - req->dst, stream_len, &rctx->rbuf); 514 + req->dst, stream_len, &rbuf); 448 515 skcipher_request_set_callback(&rctx->u.streamcipher_req, 449 - req->base.flags, 450 - adiantum_streamcipher_done, req); 451 - return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?: 452 - adiantum_finish(req); 516 + req->base.flags, NULL, NULL); 517 + err = crypto_skcipher_encrypt(&rctx->u.streamcipher_req); 518 + if (err) 519 + return err; 520 + 521 + /* If decrypting, decrypt C_M with the block cipher to get P_M */ 522 + if (!enc) 523 + crypto_cipher_decrypt_one(tctx->blockcipher, rbuf.bytes, 524 + rbuf.bytes); 525 + 526 + /* 527 + * Second hash step 528 + * enc: C_R = C_M - H_{K_H}(T, C_L) 529 + * dec: P_R = P_M - H_{K_H}(T, P_L) 530 + */ 531 + le128_sub(&rbuf.bignum, &rbuf.bignum, &header_hash); 532 + if (dst->length >= req->cryptlen && 533 + dst->offset + req->cryptlen <= PAGE_SIZE) { 534 + /* Fast path for single-page destination */ 535 + struct page *page = sg_page(dst); 536 + void *virt = kmap_local_page(page) + dst->offset; 537 + 538 + nhpoly1305_init(&rctx->u.hash_ctx); 539 + nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len); 540 + nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash); 541 + le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash); 542 + memcpy(virt + bulk_len, &rbuf.bignum, sizeof(le128)); 543 + flush_dcache_page(page); 544 + kunmap_local(virt); 545 + } else { 546 + /* Slow path that works for any destination scatterlist */ 547 + adiantum_hash_message(req, dst, &msg_hash); 548 + le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash); 549 + memcpy_to_sglist(dst, bulk_len, &rbuf.bignum, sizeof(le128)); 550 + } 551 + return 0; 453 552 } 454 553 455 554 static int adiantum_encrypt(struct skcipher_request *req) ··· 589 624 /* Stream cipher, e.g. "xchacha12" */ 590 625 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, 591 626 skcipher_crypto_instance(inst), 592 - crypto_attr_alg_name(tb[1]), 0, mask); 627 + crypto_attr_alg_name(tb[1]), 0, 628 + mask | CRYPTO_ALG_ASYNC /* sync only */); 593 629 if (err) 594 630 goto err_free_inst; 595 631 streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);