Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

blk-crypto: handle the fallback above the block layer

Add a blk_crypto_submit_bio helper that either submits the bio when
it is not encrypted or inline encryption is provided, but otherwise
handles the encryption before going down into the low-level driver.
This reduces the risk from bio reordering and keeps memory allocation
as high up in the stack as possible.

Note that if the submitter knows that inline enctryption is known to
be supported by the underyling driver, it can still use plain
submit_bio.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
bb8e2019 66e5a11d

+68 -39
+6
Documentation/block/inline-encryption.rst
··· 206 206 for en/decryption. Users don't need to worry about freeing the bio_crypt_ctx 207 207 later, as that happens automatically when the bio is freed or reset. 208 208 209 + To submit a bio that uses inline encryption, users must call 210 + ``blk_crypto_submit_bio()`` instead of the usual ``submit_bio()``. This will 211 + submit the bio to the underlying driver if it supports inline crypto, or else 212 + call the blk-crypto fallback routines before submitting normal bios to the 213 + underlying drivers. 214 + 209 215 Finally, when done using inline encryption with a blk_crypto_key on a 210 216 block_device, users must call ``blk_crypto_evict_key()``. This ensures that 211 217 the key is evicted from all keyslots it may be programmed into and unlinked from
+7 -3
block/blk-core.c
··· 628 628 /* If plug is not used, add new plug here to cache nsecs time. */ 629 629 struct blk_plug plug; 630 630 631 - if (unlikely(!blk_crypto_bio_prep(bio))) 632 - return; 633 - 634 631 blk_start_plug(&plug); 635 632 636 633 if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { ··· 790 793 */ 791 794 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) 792 795 goto not_supported; 796 + 797 + if (bio_has_crypt_ctx(bio)) { 798 + if (WARN_ON_ONCE(!bio_has_data(bio))) 799 + goto end_io; 800 + if (!blk_crypto_supported(bio)) 801 + goto not_supported; 802 + } 793 803 794 804 if (should_fail_bio(bio)) 795 805 goto end_io;
+11 -8
block/blk-crypto-internal.h
··· 86 86 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, 87 87 void __user *argp); 88 88 89 + static inline bool blk_crypto_supported(struct bio *bio) 90 + { 91 + return blk_crypto_config_supported_natively(bio->bi_bdev, 92 + &bio->bi_crypt_context->bc_key->crypto_cfg); 93 + } 94 + 89 95 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 90 96 91 97 static inline int blk_crypto_sysfs_register(struct gendisk *disk) ··· 145 139 return -ENOTTY; 146 140 } 147 141 142 + static inline bool blk_crypto_supported(struct bio *bio) 143 + { 144 + return false; 145 + } 146 + 148 147 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 149 148 150 149 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); ··· 174 163 memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun, 175 164 sizeof(rq->crypt_ctx->bc_dun)); 176 165 #endif 177 - } 178 - 179 - bool __blk_crypto_bio_prep(struct bio *bio); 180 - static inline bool blk_crypto_bio_prep(struct bio *bio) 181 - { 182 - if (bio_has_crypt_ctx(bio)) 183 - return __blk_crypto_bio_prep(bio); 184 - return true; 185 166 } 186 167 187 168 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
+6 -17
block/blk-crypto.c
··· 242 242 rq->crypt_ctx = NULL; 243 243 } 244 244 245 - /** 246 - * __blk_crypto_bio_prep - Prepare bio for inline encryption 247 - * @bio: bio to prepare 245 + /* 246 + * Process a bio with a crypto context. Returns true if the caller should 247 + * submit the passed in bio, false if the bio is consumed. 248 248 * 249 - * If the bio crypt context provided for the bio is supported by the underlying 250 - * device's inline encryption hardware, do nothing. 251 - * 252 - * Otherwise, try to perform en/decryption for this bio by falling back to the 253 - * kernel crypto API. For encryption this means submitting newly allocated 254 - * bios for the encrypted payload while keeping back the source bio until they 255 - * complete, while for reads the decryption happens in-place by a hooked in 256 - * completion handler. 257 - * 258 - * Caller must ensure bio has bio_crypt_ctx. 259 - * 260 - * Return: true if @bio should be submitted to the driver by the caller, else 261 - * false. Sets bio->bi_status, calls bio_endio and returns false on error. 249 + * See the kerneldoc comment for blk_crypto_submit_bio for further details. 262 250 */ 263 - bool __blk_crypto_bio_prep(struct bio *bio) 251 + bool __blk_crypto_submit_bio(struct bio *bio) 264 252 { 265 253 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; 266 254 struct block_device *bdev = bio->bi_bdev; ··· 276 288 277 289 return true; 278 290 } 291 + EXPORT_SYMBOL_GPL(__blk_crypto_submit_bio); 279 292 280 293 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, 281 294 gfp_t gfp_mask)
+2 -1
fs/buffer.c
··· 29 29 #include <linux/slab.h> 30 30 #include <linux/capability.h> 31 31 #include <linux/blkdev.h> 32 + #include <linux/blk-crypto.h> 32 33 #include <linux/file.h> 33 34 #include <linux/quotaops.h> 34 35 #include <linux/highmem.h> ··· 2822 2821 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size); 2823 2822 } 2824 2823 2825 - submit_bio(bio); 2824 + blk_crypto_submit_bio(bio); 2826 2825 } 2827 2826 2828 2827 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
+1 -1
fs/crypto/bio.c
··· 105 105 } 106 106 107 107 atomic_inc(&done.pending); 108 - submit_bio(bio); 108 + blk_crypto_submit_bio(bio); 109 109 } 110 110 111 111 fscrypt_zeroout_range_done(&done);
+2 -1
fs/ext4/page-io.c
··· 7 7 * Written by Theodore Ts'o, 2010. 8 8 */ 9 9 10 + #include <linux/blk-crypto.h> 10 11 #include <linux/fs.h> 11 12 #include <linux/time.h> 12 13 #include <linux/highuid.h> ··· 402 401 if (bio) { 403 402 if (io->io_wbc->sync_mode == WB_SYNC_ALL) 404 403 io->io_bio->bi_opf |= REQ_SYNC; 405 - submit_bio(io->io_bio); 404 + blk_crypto_submit_bio(io->io_bio); 406 405 } 407 406 io->io_bio = NULL; 408 407 }
+5 -4
fs/ext4/readpage.c
··· 36 36 #include <linux/bio.h> 37 37 #include <linux/fs.h> 38 38 #include <linux/buffer_head.h> 39 + #include <linux/blk-crypto.h> 39 40 #include <linux/blkdev.h> 40 41 #include <linux/highmem.h> 41 42 #include <linux/prefetch.h> ··· 346 345 if (bio && (last_block_in_bio != first_block - 1 || 347 346 !fscrypt_mergeable_bio(bio, inode, next_block))) { 348 347 submit_and_realloc: 349 - submit_bio(bio); 348 + blk_crypto_submit_bio(bio); 350 349 bio = NULL; 351 350 } 352 351 if (bio == NULL) { ··· 372 371 if (((map.m_flags & EXT4_MAP_BOUNDARY) && 373 372 (relative_block == map.m_len)) || 374 373 (first_hole != blocks_per_folio)) { 375 - submit_bio(bio); 374 + blk_crypto_submit_bio(bio); 376 375 bio = NULL; 377 376 } else 378 377 last_block_in_bio = first_block + blocks_per_folio - 1; 379 378 continue; 380 379 confused: 381 380 if (bio) { 382 - submit_bio(bio); 381 + blk_crypto_submit_bio(bio); 383 382 bio = NULL; 384 383 } 385 384 if (!folio_test_uptodate(folio)) ··· 390 389 ; /* A label shall be followed by a statement until C23 */ 391 390 } 392 391 if (bio) 393 - submit_bio(bio); 392 + blk_crypto_submit_bio(bio); 394 393 return 0; 395 394 } 396 395
+2 -2
fs/f2fs/data.c
··· 513 513 trace_f2fs_submit_read_bio(sbi->sb, type, bio); 514 514 515 515 iostat_update_submit_ctx(bio, type); 516 - submit_bio(bio); 516 + blk_crypto_submit_bio(bio); 517 517 } 518 518 519 519 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio, ··· 522 522 WARN_ON_ONCE(is_read_io(bio_op(bio))); 523 523 trace_f2fs_submit_write_bio(sbi->sb, type, bio); 524 524 iostat_update_submit_ctx(bio, type); 525 - submit_bio(bio); 525 + blk_crypto_submit_bio(bio); 526 526 } 527 527 528 528 static void __submit_merged_bio(struct f2fs_bio_info *io)
+2 -1
fs/f2fs/file.c
··· 5 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 6 * http://www.samsung.com/ 7 7 */ 8 + #include <linux/blk-crypto.h> 8 9 #include <linux/fs.h> 9 10 #include <linux/f2fs_fs.h> 10 11 #include <linux/stat.h> ··· 5047 5046 enum temp_type temp = f2fs_get_segment_temp(sbi, type); 5048 5047 5049 5048 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp); 5050 - submit_bio(bio); 5049 + blk_crypto_submit_bio(bio); 5051 5050 } 5052 5051 5053 5052 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
+2 -1
fs/iomap/direct-io.c
··· 3 3 * Copyright (C) 2010 Red Hat, Inc. 4 4 * Copyright (c) 2016-2025 Christoph Hellwig. 5 5 */ 6 + #include <linux/blk-crypto.h> 6 7 #include <linux/fscrypt.h> 7 8 #include <linux/pagemap.h> 8 9 #include <linux/iomap.h> ··· 75 74 dio->dops->submit_io(iter, bio, pos); 76 75 } else { 77 76 WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE); 78 - submit_bio(bio); 77 + blk_crypto_submit_bio(bio); 79 78 } 80 79 } 81 80
+22
include/linux/blk-crypto.h
··· 181 181 182 182 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 183 183 184 + bool __blk_crypto_submit_bio(struct bio *bio); 185 + 186 + /** 187 + * blk_crypto_submit_bio - Submit a bio that may have a crypto context 188 + * @bio: bio to submit 189 + * 190 + * If @bio has no crypto context, or the crypt context attached to @bio is 191 + * supported by the underlying device's inline encryption hardware, just submit 192 + * @bio. 193 + * 194 + * Otherwise, try to perform en/decryption for this bio by falling back to the 195 + * kernel crypto API. For encryption this means submitting newly allocated 196 + * bios for the encrypted payload while keeping back the source bio until they 197 + * complete, while for reads the decryption happens in-place by a hooked in 198 + * completion handler. 199 + */ 200 + static inline void blk_crypto_submit_bio(struct bio *bio) 201 + { 202 + if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio)) 203 + submit_bio(bio); 204 + } 205 + 184 206 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 185 207 /** 186 208 * bio_crypt_clone - clone bio encryption context