Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-6.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mikulas Patocka:

- fix checking for request-based stackable devices (dm-table)

- fix corrupt_bio_byte setup checks (dm-flakey)

- add support for resync w/o metadata devices (dm raid)

- small code simplification (dm, dm-mpath, vm-vdo, dm-raid)

- remove support for asynchronous hashes (dm-verity)

- close smatch warning (dm-zoned-target)

- update the documentation and enable inline-crypto passthrough
(dm-thin)

* tag 'for-6.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm: set DM_TARGET_PASSES_CRYPTO feature for dm-thin
dm-thin: update the documentation
dm-raid: do not include dm-core.h
vdo: omit need_resched() before cond_resched()
md: dm-zoned-target: Initialize return variable r to avoid uninitialized use
dm-verity: remove support for asynchronous hashes
dm-mpath: don't print the "loaded" message if registering fails
dm-mpath: make dm_unregister_path_selector return void
dm: ima: avoid extra calls to strlen()
dm: Simplify dm_io_complete()
dm: Remove unnecessary return in dm_zone_endio()
dm raid: add support for resync w/o metadata devices
dm-flakey: Fix corrupt_bio_byte setup checks
dm-table: fix checking for rq stackable devices

+110 -261
+8 -8
Documentation/admin-guide/device-mapper/thin-provisioning.rst
··· 80 80 81 81 As a guide, we suggest you calculate the number of bytes to use in the 82 82 metadata device as 48 * $data_dev_size / $data_block_size but round it up 83 - to 2MB if the answer is smaller. If you're creating large numbers of 83 + to 2MiB if the answer is smaller. If you're creating large numbers of 84 84 snapshots which are recording large amounts of change, you may find you 85 85 need to increase this. 86 86 87 - The largest size supported is 16GB: If the device is larger, 87 + The largest size supported is 16GiB: If the device is larger, 88 88 a warning will be issued and the excess space will not be used. 89 89 90 90 Reloading a pool table ··· 107 107 108 108 $data_block_size gives the smallest unit of disk space that can be 109 109 allocated at a time expressed in units of 512-byte sectors. 110 - $data_block_size must be between 128 (64KB) and 2097152 (1GB) and a 111 - multiple of 128 (64KB). $data_block_size cannot be changed after the 110 + $data_block_size must be between 128 (64KiB) and 2097152 (1GiB) and a 111 + multiple of 128 (64KiB). $data_block_size cannot be changed after the 112 112 thin-pool is created. People primarily interested in thin provisioning 113 - may want to use a value such as 1024 (512KB). People doing lots of 114 - snapshotting may want a smaller value such as 128 (64KB). If you are 113 + may want to use a value such as 1024 (512KiB). People doing lots of 114 + snapshotting may want a smaller value such as 128 (64KiB). If you are 115 115 not zeroing newly-allocated data, a larger $data_block_size in the 116 - region of 256000 (128MB) is suggested. 116 + region of 262144 (128MiB) is suggested. 117 117 118 118 $low_water_mark is expressed in blocks of size $data_block_size. If 119 119 free space on the data device drops below this level then a dm event ··· 291 291 error_if_no_space: 292 292 Error IOs, instead of queueing, if no space. 293 293 294 - Data block size must be between 64KB (128 sectors) and 1GB 294 + Data block size must be between 64KiB (128 sectors) and 1GiB 295 295 (2097152 sectors) inclusive. 296 296 297 297
+6 -3
drivers/md/dm-flakey.c
··· 215 215 } 216 216 217 217 if (test_bit(DROP_WRITES, &fc->flags) && 218 - (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) { 218 + ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) || 219 + fc->random_write_corrupt)) { 219 220 ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set"; 220 221 return -EINVAL; 221 222 222 223 } else if (test_bit(ERROR_WRITES, &fc->flags) && 223 - (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) { 224 + ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) || 225 + fc->random_write_corrupt)) { 224 226 ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set"; 225 227 return -EINVAL; 226 228 } else if (test_bit(ERROR_READS, &fc->flags) && 227 - (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) { 229 + ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == READ) || 230 + fc->random_read_corrupt)) { 228 231 ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set"; 229 232 return -EINVAL; 230 233 }
+19 -23
drivers/md/dm-ima.c
··· 241 241 /* 242 242 * First retrieve the target metadata. 243 243 */ 244 - scnprintf(target_metadata_buf, DM_IMA_TARGET_METADATA_BUF_LEN, 245 - "target_index=%d,target_begin=%llu,target_len=%llu,", 246 - i, ti->begin, ti->len); 247 - target_metadata_buf_len = strlen(target_metadata_buf); 244 + target_metadata_buf_len = 245 + scnprintf(target_metadata_buf, 246 + DM_IMA_TARGET_METADATA_BUF_LEN, 247 + "target_index=%d,target_begin=%llu,target_len=%llu,", 248 + i, ti->begin, ti->len); 248 249 249 250 /* 250 251 * Then retrieve the actual target data. ··· 449 448 if (r) 450 449 goto error; 451 450 452 - scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN, 453 - "%sname=%s,uuid=%s;device_resume=no_data;", 454 - DM_IMA_VERSION_STR, dev_name, dev_uuid); 455 - l = strlen(device_table_data); 456 - 451 + l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN, 452 + "%sname=%s,uuid=%s;device_resume=no_data;", 453 + DM_IMA_VERSION_STR, dev_name, dev_uuid); 457 454 } 458 455 459 456 capacity_len = strlen(capacity_str); ··· 560 561 if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio)) 561 562 goto error; 562 563 563 - scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN, 564 - "%sname=%s,uuid=%s;device_remove=no_data;", 565 - DM_IMA_VERSION_STR, dev_name, dev_uuid); 566 - l = strlen(device_table_data); 564 + l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN, 565 + "%sname=%s,uuid=%s;device_remove=no_data;", 566 + DM_IMA_VERSION_STR, dev_name, dev_uuid); 567 567 } 568 568 569 569 memcpy(device_table_data + l, remove_all_str, remove_all_len); ··· 645 647 if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio)) 646 648 goto error2; 647 649 648 - scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN, 649 - "%sname=%s,uuid=%s;table_clear=no_data;", 650 - DM_IMA_VERSION_STR, dev_name, dev_uuid); 651 - l = strlen(device_table_data); 650 + l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN, 651 + "%sname=%s,uuid=%s;table_clear=no_data;", 652 + DM_IMA_VERSION_STR, dev_name, dev_uuid); 652 653 } 653 654 654 655 capacity_len = strlen(capacity_str); ··· 703 706 char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL; 704 707 char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL; 705 708 bool noio = true; 706 - int r; 709 + int r, len; 707 710 708 711 if (dm_ima_alloc_and_copy_device_data(md, &new_device_data, 709 712 md->ima.active_table.num_targets, noio)) ··· 725 728 md->ima.active_table.device_metadata = new_device_data; 726 729 md->ima.active_table.device_metadata_len = strlen(new_device_data); 727 730 728 - scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2, 729 - "%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data, 730 - new_dev_name, new_dev_uuid, capacity_str); 731 + len = scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2, 732 + "%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data, 733 + new_dev_name, new_dev_uuid, capacity_str); 731 734 732 - dm_ima_measure_data("dm_device_rename", combined_device_data, strlen(combined_device_data), 733 - noio); 735 + dm_ima_measure_data("dm_device_rename", combined_device_data, len, noio); 734 736 735 737 goto exit; 736 738
+3 -5
drivers/md/dm-path-selector.c
··· 117 117 } 118 118 EXPORT_SYMBOL_GPL(dm_register_path_selector); 119 119 120 - int dm_unregister_path_selector(struct path_selector_type *pst) 120 + void dm_unregister_path_selector(struct path_selector_type *pst) 121 121 { 122 122 struct ps_internal *psi; 123 123 124 124 down_write(&_ps_lock); 125 125 126 126 psi = __find_path_selector_type(pst->name); 127 - if (!psi) { 127 + if (WARN_ON(!psi)) { 128 128 up_write(&_ps_lock); 129 - return -EINVAL; 129 + return; 130 130 } 131 131 132 132 list_del(&psi->list); ··· 134 134 up_write(&_ps_lock); 135 135 136 136 kfree(psi); 137 - 138 - return 0; 139 137 } 140 138 EXPORT_SYMBOL_GPL(dm_unregister_path_selector);
+1 -1
drivers/md/dm-path-selector.h
··· 96 96 int dm_register_path_selector(struct path_selector_type *type); 97 97 98 98 /* Unregister a path selector */ 99 - int dm_unregister_path_selector(struct path_selector_type *type); 99 + void dm_unregister_path_selector(struct path_selector_type *type); 100 100 101 101 /* Returns a registered path selector type */ 102 102 struct path_selector_type *dm_get_path_selector(const char *name);
+4 -5
drivers/md/dm-ps-historical-service-time.c
··· 541 541 { 542 542 int r = dm_register_path_selector(&hst_ps); 543 543 544 - if (r < 0) 544 + if (r < 0) { 545 545 DMERR("register failed %d", r); 546 + return r; 547 + } 546 548 547 549 DMINFO("version " HST_VERSION " loaded"); 548 550 ··· 553 551 554 552 static void __exit dm_hst_exit(void) 555 553 { 556 - int r = dm_unregister_path_selector(&hst_ps); 557 - 558 - if (r < 0) 559 - DMERR("unregister failed %d", r); 554 + dm_unregister_path_selector(&hst_ps); 560 555 } 561 556 562 557 module_init(dm_hst_init);
+1 -4
drivers/md/dm-ps-io-affinity.c
··· 260 260 261 261 static void __exit dm_ioa_exit(void) 262 262 { 263 - int ret = dm_unregister_path_selector(&ioa_ps); 264 - 265 - if (ret < 0) 266 - DMERR("unregister failed %d", ret); 263 + dm_unregister_path_selector(&ioa_ps); 267 264 } 268 265 269 266 module_init(dm_ioa_init);
+4 -5
drivers/md/dm-ps-queue-length.c
··· 260 260 { 261 261 int r = dm_register_path_selector(&ql_ps); 262 262 263 - if (r < 0) 263 + if (r < 0) { 264 264 DMERR("register failed %d", r); 265 + return r; 266 + } 265 267 266 268 DMINFO("version " QL_VERSION " loaded"); 267 269 ··· 272 270 273 271 static void __exit dm_ql_exit(void) 274 272 { 275 - int r = dm_unregister_path_selector(&ql_ps); 276 - 277 - if (r < 0) 278 - DMERR("unregister failed %d", r); 273 + dm_unregister_path_selector(&ql_ps); 279 274 } 280 275 281 276 module_init(dm_ql_init);
+4 -5
drivers/md/dm-ps-round-robin.c
··· 220 220 { 221 221 int r = dm_register_path_selector(&rr_ps); 222 222 223 - if (r < 0) 223 + if (r < 0) { 224 224 DMERR("register failed %d", r); 225 + return r; 226 + } 225 227 226 228 DMINFO("version " RR_VERSION " loaded"); 227 229 ··· 232 230 233 231 static void __exit dm_rr_exit(void) 234 232 { 235 - int r = dm_unregister_path_selector(&rr_ps); 236 - 237 - if (r < 0) 238 - DMERR("unregister failed %d", r); 233 + dm_unregister_path_selector(&rr_ps); 239 234 } 240 235 241 236 module_init(dm_rr_init);
+4 -5
drivers/md/dm-ps-service-time.c
··· 341 341 { 342 342 int r = dm_register_path_selector(&st_ps); 343 343 344 - if (r < 0) 344 + if (r < 0) { 345 345 DMERR("register failed %d", r); 346 + return r; 347 + } 346 348 347 349 DMINFO("version " ST_VERSION " loaded"); 348 350 ··· 353 351 354 352 static void __exit dm_st_exit(void) 355 353 { 356 - int r = dm_unregister_path_selector(&st_ps); 357 - 358 - if (r < 0) 359 - DMERR("unregister failed %d", r); 354 + dm_unregister_path_selector(&st_ps); 360 355 } 361 356 362 357 module_init(dm_st_init);
+5 -2
drivers/md/dm-raid.c
··· 14 14 #include "raid5.h" 15 15 #include "raid10.h" 16 16 #include "md-bitmap.h" 17 - #include "dm-core.h" 18 17 19 18 #include <linux/device-mapper.h> 20 19 ··· 2531 2532 struct md_rdev *rdev, *freshest; 2532 2533 struct mddev *mddev = &rs->md; 2533 2534 2535 + /* Respect resynchronization requested with "sync" argument. */ 2536 + if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) 2537 + set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2538 + 2534 2539 freshest = NULL; 2535 2540 rdev_for_each(rdev, mddev) { 2536 2541 if (test_bit(Journal, &rdev->flags)) ··· 3308 3305 3309 3306 /* Disable/enable discard support on raid set. */ 3310 3307 configure_discard_support(rs); 3311 - rs->md.dm_gendisk = ti->table->md->disk; 3308 + rs->md.dm_gendisk = dm_disk(dm_table_get_md(ti->table)); 3312 3309 3313 3310 mddev_unlock(&rs->md); 3314 3311 return 0;
+5 -5
drivers/md/dm-table.c
··· 899 899 return true; 900 900 } 901 901 902 - static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, 903 - sector_t start, sector_t len, void *data) 902 + static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev, 903 + sector_t start, sector_t len, void *data) 904 904 { 905 905 struct block_device *bdev = dev->bdev; 906 906 struct request_queue *q = bdev_get_queue(bdev); 907 907 908 908 /* request-based cannot stack on partitions! */ 909 909 if (bdev_is_partition(bdev)) 910 - return false; 910 + return true; 911 911 912 - return queue_is_mq(q); 912 + return !queue_is_mq(q); 913 913 } 914 914 915 915 static int dm_table_determine_type(struct dm_table *t) ··· 1005 1005 1006 1006 /* Non-request-stackable devices can't be used for request-based dm */ 1007 1007 if (!ti->type->iterate_devices || 1008 - !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { 1008 + ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) { 1009 1009 DMERR("table load rejected: including non-request-stackable devices"); 1010 1010 return -EINVAL; 1011 1011 }
+4 -3
drivers/md/dm-thin.c
··· 4111 4111 static struct target_type pool_target = { 4112 4112 .name = "thin-pool", 4113 4113 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 4114 - DM_TARGET_IMMUTABLE, 4115 - .version = {1, 23, 0}, 4114 + DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_CRYPTO, 4115 + .version = {1, 24, 0}, 4116 4116 .module = THIS_MODULE, 4117 4117 .ctr = pool_ctr, 4118 4118 .dtr = pool_dtr, ··· 4497 4497 4498 4498 static struct target_type thin_target = { 4499 4499 .name = "thin", 4500 - .version = {1, 23, 0}, 4500 + .features = DM_TARGET_PASSES_CRYPTO, 4501 + .version = {1, 24, 0}, 4501 4502 .module = THIS_MODULE, 4502 4503 .ctr = thin_ctr, 4503 4504 .dtr = thin_dtr,
+1 -2
drivers/md/dm-vdo/funnel-workqueue.c
··· 252 252 * This speeds up some performance tests; that "other work" might include other VDO 253 253 * threads. 254 254 */ 255 - if (need_resched()) 256 - cond_resched(); 255 + cond_resched(); 257 256 } 258 257 259 258 run_finish_hook(queue);
+2 -2
drivers/md/dm-verity-fec.c
··· 191 191 u8 *want_digest, u8 *data) 192 192 { 193 193 if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits, 194 - verity_io_real_digest(v, io), true))) 194 + verity_io_real_digest(v, io)))) 195 195 return 0; 196 196 197 197 return memcmp(verity_io_real_digest(v, io), want_digest, ··· 392 392 393 393 /* Always re-validate the corrected block against the expected hash */ 394 394 r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits, 395 - verity_io_real_digest(v, io), true); 395 + verity_io_real_digest(v, io)); 396 396 if (unlikely(r < 0)) 397 397 return r; 398 398
+29 -156
drivers/md/dm-verity-target.c
··· 19 19 #include "dm-audit.h" 20 20 #include <linux/module.h> 21 21 #include <linux/reboot.h> 22 - #include <linux/scatterlist.h> 23 22 #include <linux/string.h> 24 23 #include <linux/jump_label.h> 25 24 #include <linux/security.h> ··· 59 60 module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644); 60 61 61 62 static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled); 62 - 63 - /* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */ 64 - static DEFINE_STATIC_KEY_FALSE(ahash_enabled); 65 63 66 64 struct dm_verity_prefetch_work { 67 65 struct work_struct work; ··· 114 118 return block >> (level * v->hash_per_block_bits); 115 119 } 116 120 117 - static int verity_ahash_update(struct dm_verity *v, struct ahash_request *req, 118 - const u8 *data, size_t len, 119 - struct crypto_wait *wait) 120 - { 121 - struct scatterlist sg; 122 - 123 - if (likely(!is_vmalloc_addr(data))) { 124 - sg_init_one(&sg, data, len); 125 - ahash_request_set_crypt(req, &sg, NULL, len); 126 - return crypto_wait_req(crypto_ahash_update(req), wait); 127 - } 128 - 129 - do { 130 - int r; 131 - size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data)); 132 - 133 - flush_kernel_vmap_range((void *)data, this_step); 134 - sg_init_table(&sg, 1); 135 - sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data)); 136 - ahash_request_set_crypt(req, &sg, NULL, this_step); 137 - r = crypto_wait_req(crypto_ahash_update(req), wait); 138 - if (unlikely(r)) 139 - return r; 140 - data += this_step; 141 - len -= this_step; 142 - } while (len); 143 - 144 - return 0; 145 - } 146 - 147 - /* 148 - * Wrapper for crypto_ahash_init, which handles verity salting. 149 - */ 150 - static int verity_ahash_init(struct dm_verity *v, struct ahash_request *req, 151 - struct crypto_wait *wait, bool may_sleep) 152 - { 153 - int r; 154 - 155 - ahash_request_set_tfm(req, v->ahash_tfm); 156 - ahash_request_set_callback(req, 157 - may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0, 158 - crypto_req_done, (void *)wait); 159 - crypto_init_wait(wait); 160 - 161 - r = crypto_wait_req(crypto_ahash_init(req), wait); 162 - 163 - if (unlikely(r < 0)) { 164 - if (r != -ENOMEM) 165 - DMERR("crypto_ahash_init failed: %d", r); 166 - return r; 167 - } 168 - 169 - if (likely(v->salt_size && (v->version >= 1))) 170 - r = verity_ahash_update(v, req, v->salt, v->salt_size, wait); 171 - 172 - return r; 173 - } 174 - 175 - static int verity_ahash_final(struct dm_verity *v, struct ahash_request *req, 176 - u8 *digest, struct crypto_wait *wait) 177 - { 178 - int r; 179 - 180 - if (unlikely(v->salt_size && (!v->version))) { 181 - r = verity_ahash_update(v, req, v->salt, v->salt_size, wait); 182 - 183 - if (r < 0) { 184 - DMERR("%s failed updating salt: %d", __func__, r); 185 - goto out; 186 - } 187 - } 188 - 189 - ahash_request_set_crypt(req, NULL, digest, 0); 190 - r = crypto_wait_req(crypto_ahash_final(req), wait); 191 - out: 192 - return r; 193 - } 194 - 195 121 int verity_hash(struct dm_verity *v, struct dm_verity_io *io, 196 - const u8 *data, size_t len, u8 *digest, bool may_sleep) 122 + const u8 *data, size_t len, u8 *digest) 197 123 { 124 + struct shash_desc *desc = &io->hash_desc; 198 125 int r; 199 126 200 - if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) { 201 - struct ahash_request *req = verity_io_hash_req(v, io); 202 - struct crypto_wait wait; 203 - 204 - r = verity_ahash_init(v, req, &wait, may_sleep) ?: 205 - verity_ahash_update(v, req, data, len, &wait) ?: 206 - verity_ahash_final(v, req, digest, &wait); 127 + desc->tfm = v->shash_tfm; 128 + if (unlikely(v->initial_hashstate == NULL)) { 129 + /* Version 0: salt at end */ 130 + r = crypto_shash_init(desc) ?: 131 + crypto_shash_update(desc, data, len) ?: 132 + crypto_shash_update(desc, v->salt, v->salt_size) ?: 133 + crypto_shash_final(desc, digest); 207 134 } else { 208 - struct shash_desc *desc = verity_io_hash_req(v, io); 209 - 210 - desc->tfm = v->shash_tfm; 135 + /* Version 1: salt at beginning */ 211 136 r = crypto_shash_import(desc, v->initial_hashstate) ?: 212 137 crypto_shash_finup(desc, data, len, digest); 213 138 } ··· 279 362 } 280 363 281 364 r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits, 282 - verity_io_real_digest(v, io), !io->in_bh); 365 + verity_io_real_digest(v, io)); 283 366 if (unlikely(r < 0)) 284 367 goto release_ret_r; 285 368 ··· 382 465 goto free_ret; 383 466 384 467 r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits, 385 - verity_io_real_digest(v, io), true); 468 + verity_io_real_digest(v, io)); 386 469 if (unlikely(r)) 387 470 goto free_ret; 388 471 ··· 498 581 } 499 582 500 583 r = verity_hash(v, io, data, block_size, 501 - verity_io_real_digest(v, io), !io->in_bh); 584 + verity_io_real_digest(v, io)); 502 585 if (unlikely(r < 0)) { 503 586 kunmap_local(data); 504 587 return r; ··· 1009 1092 kfree(v->zero_digest); 1010 1093 verity_free_sig(v); 1011 1094 1012 - if (v->ahash_tfm) { 1013 - static_branch_dec(&ahash_enabled); 1014 - crypto_free_ahash(v->ahash_tfm); 1015 - } else { 1016 - crypto_free_shash(v->shash_tfm); 1017 - } 1095 + crypto_free_shash(v->shash_tfm); 1018 1096 1019 1097 kfree(v->alg_name); 1020 1098 ··· 1069 1157 if (!v->zero_digest) 1070 1158 return r; 1071 1159 1072 - io = kmalloc(sizeof(*io) + v->hash_reqsize, GFP_KERNEL); 1160 + io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm), 1161 + GFP_KERNEL); 1073 1162 1074 1163 if (!io) 1075 1164 return r; /* verity_dtr will free zero_digest */ ··· 1081 1168 goto out; 1082 1169 1083 1170 r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits, 1084 - v->zero_digest, true); 1171 + v->zero_digest); 1085 1172 1086 1173 out: 1087 1174 kfree(io); ··· 1237 1324 static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name) 1238 1325 { 1239 1326 struct dm_target *ti = v->ti; 1240 - struct crypto_ahash *ahash; 1241 - struct crypto_shash *shash = NULL; 1242 - const char *driver_name; 1327 + struct crypto_shash *shash; 1243 1328 1244 1329 v->alg_name = kstrdup(alg_name, GFP_KERNEL); 1245 1330 if (!v->alg_name) { ··· 1245 1334 return -ENOMEM; 1246 1335 } 1247 1336 1248 - /* 1249 - * Allocate the hash transformation object that this dm-verity instance 1250 - * will use. The vast majority of dm-verity users use CPU-based 1251 - * hashing, so when possible use the shash API to minimize the crypto 1252 - * API overhead. If the ahash API resolves to a different driver 1253 - * (likely an off-CPU hardware offload), use ahash instead. Also use 1254 - * ahash if the obsolete dm-verity format with the appended salt is 1255 - * being used, so that quirk only needs to be handled in one place. 1256 - */ 1257 - ahash = crypto_alloc_ahash(alg_name, 0, 1258 - v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0); 1259 - if (IS_ERR(ahash)) { 1337 + shash = crypto_alloc_shash(alg_name, 0, 0); 1338 + if (IS_ERR(shash)) { 1260 1339 ti->error = "Cannot initialize hash function"; 1261 - return PTR_ERR(ahash); 1340 + return PTR_ERR(shash); 1262 1341 } 1263 - driver_name = crypto_ahash_driver_name(ahash); 1264 - if (v->version >= 1 /* salt prepended, not appended? */) { 1265 - shash = crypto_alloc_shash(alg_name, 0, 0); 1266 - if (!IS_ERR(shash) && 1267 - strcmp(crypto_shash_driver_name(shash), driver_name) != 0) { 1268 - /* 1269 - * ahash gave a different driver than shash, so probably 1270 - * this is a case of real hardware offload. Use ahash. 1271 - */ 1272 - crypto_free_shash(shash); 1273 - shash = NULL; 1274 - } 1275 - } 1276 - if (!IS_ERR_OR_NULL(shash)) { 1277 - crypto_free_ahash(ahash); 1278 - ahash = NULL; 1279 - v->shash_tfm = shash; 1280 - v->digest_size = crypto_shash_digestsize(shash); 1281 - v->hash_reqsize = sizeof(struct shash_desc) + 1282 - crypto_shash_descsize(shash); 1283 - DMINFO("%s using shash \"%s\"", alg_name, driver_name); 1284 - } else { 1285 - v->ahash_tfm = ahash; 1286 - static_branch_inc(&ahash_enabled); 1287 - v->digest_size = crypto_ahash_digestsize(ahash); 1288 - v->hash_reqsize = sizeof(struct ahash_request) + 1289 - crypto_ahash_reqsize(ahash); 1290 - DMINFO("%s using ahash \"%s\"", alg_name, driver_name); 1291 - } 1342 + v->shash_tfm = shash; 1343 + v->digest_size = crypto_shash_digestsize(shash); 1344 + DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash)); 1292 1345 if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) { 1293 1346 ti->error = "Digest size too big"; 1294 1347 return -EINVAL; ··· 1277 1402 return -EINVAL; 1278 1403 } 1279 1404 } 1280 - if (v->shash_tfm) { 1405 + if (v->version) { /* Version 1: salt at beginning */ 1281 1406 SHASH_DESC_ON_STACK(desc, v->shash_tfm); 1282 1407 int r; 1283 1408 ··· 1556 1681 goto bad; 1557 1682 } 1558 1683 1559 - ti->per_io_data_size = sizeof(struct dm_verity_io) + v->hash_reqsize; 1684 + ti->per_io_data_size = sizeof(struct dm_verity_io) + 1685 + crypto_shash_descsize(v->shash_tfm); 1560 1686 1561 1687 r = verity_fec_ctr(v); 1562 1688 if (r) ··· 1664 1788 bdev = dm_disk(dm_table_get_md(ti->table))->part0; 1665 1789 root_digest.digest = v->root_digest; 1666 1790 root_digest.digest_len = v->digest_size; 1667 - if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) 1668 - root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm); 1669 - else 1670 - root_digest.alg = crypto_shash_alg_name(v->shash_tfm); 1791 + root_digest.alg = crypto_shash_alg_name(v->shash_tfm); 1671 1792 1672 1793 r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest, 1673 1794 sizeof(root_digest)); ··· 1690 1817 .name = "verity", 1691 1818 /* Note: the LSMs depend on the singleton and immutable features */ 1692 1819 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, 1693 - .version = {1, 11, 0}, 1820 + .version = {1, 12, 0}, 1694 1821 .module = THIS_MODULE, 1695 1822 .ctr = verity_ctr, 1696 1823 .dtr = verity_dtr,
+7 -15
drivers/md/dm-verity.h
··· 39 39 struct dm_target *ti; 40 40 struct dm_bufio_client *bufio; 41 41 char *alg_name; 42 - struct crypto_ahash *ahash_tfm; /* either this or shash_tfm is set */ 43 - struct crypto_shash *shash_tfm; /* either this or ahash_tfm is set */ 42 + struct crypto_shash *shash_tfm; 44 43 u8 *root_digest; /* digest of the root block */ 45 44 u8 *salt; /* salt: its size is salt_size */ 46 - u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */ 45 + u8 *initial_hashstate; /* salted initial state, if version >= 1 */ 47 46 u8 *zero_digest; /* digest for a zero block */ 48 47 #ifdef CONFIG_SECURITY 49 48 u8 *root_digest_sig; /* signature of the root digest */ ··· 60 61 bool hash_failed:1; /* set if hash of any block failed */ 61 62 bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */ 62 63 unsigned int digest_size; /* digest size for the current hash algorithm */ 63 - unsigned int hash_reqsize; /* the size of temporary space for crypto */ 64 64 enum verity_mode mode; /* mode for handling verification errors */ 65 65 enum verity_mode error_mode;/* mode for handling I/O errors */ 66 66 unsigned int corrupted_errs;/* Number of errors for corrupted blocks */ ··· 98 100 u8 want_digest[HASH_MAX_DIGESTSIZE]; 99 101 100 102 /* 101 - * This struct is followed by a variable-sized hash request of size 102 - * v->hash_reqsize, either a struct ahash_request or a struct shash_desc 103 - * (depending on whether ahash_tfm or shash_tfm is being used). To 104 - * access it, use verity_io_hash_req(). 103 + * Temporary space for hashing. This is variable-length and must be at 104 + * the end of the struct. struct shash_desc is just the fixed part; 105 + * it's followed by a context of size crypto_shash_descsize(shash_tfm). 105 106 */ 107 + struct shash_desc hash_desc; 106 108 }; 107 - 108 - static inline void *verity_io_hash_req(struct dm_verity *v, 109 - struct dm_verity_io *io) 110 - { 111 - return io + 1; 112 - } 113 109 114 110 static inline u8 *verity_io_real_digest(struct dm_verity *v, 115 111 struct dm_verity_io *io) ··· 118 126 } 119 127 120 128 extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io, 121 - const u8 *data, size_t len, u8 *digest, bool may_sleep); 129 + const u8 *data, size_t len, u8 *digest); 122 130 123 131 extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, 124 132 sector_t block, u8 *digest, bool *is_zero);
-2
drivers/md/dm-zone.c
··· 467 467 bdev_offset_from_zone_start(disk->part0, 468 468 clone->bi_iter.bi_sector); 469 469 } 470 - 471 - return; 472 470 } 473 471 474 472 static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
+1 -1
drivers/md/dm-zoned-target.c
··· 1062 1062 struct dmz_target *dmz = ti->private; 1063 1063 unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata); 1064 1064 sector_t capacity; 1065 - int i, r; 1065 + int i, r = 0; 1066 1066 1067 1067 for (i = 0; i < dmz->nr_ddevs; i++) { 1068 1068 capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
+2 -9
drivers/md/dm.c
··· 1024 1024 * 1025 1025 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. 1026 1026 */ 1027 - static void dm_io_complete(struct dm_io *io) 1027 + static inline void dm_io_complete(struct dm_io *io) 1028 1028 { 1029 - bool first_requeue; 1030 - 1031 1029 /* 1032 1030 * Only dm_io that has been split needs two stage requeue, otherwise 1033 1031 * we may run into long bio clone chain during suspend and OOM could ··· 1034 1036 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they 1035 1037 * also aren't handled via the first stage requeue. 1036 1038 */ 1037 - if (dm_io_flagged(io, DM_IO_WAS_SPLIT)) 1038 - first_requeue = true; 1039 - else 1040 - first_requeue = false; 1041 - 1042 - __dm_io_complete(io, first_requeue); 1039 + __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT)); 1043 1040 } 1044 1041 1045 1042 /*