Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
"Main thing here is the fix for the regression in flush handling which
caused IO hangs/stalls for a few reporters. Hopefully that should all
be sorted out now. Outside of that, just a few minor fixes for issues
that were introduced in this cycle"

* tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux:
blk-mq: release scheduler resource when request completes
blk-crypto: dynamically allocate fallback profile
blk-cgroup: hold queue_lock when removing blkg->q_node
drivers/rnbd: restore sysfs interface to rnbd-client

+49 -17
+2
block/blk-cgroup.c
··· 136 136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 137 137 if (blkg->parent) 138 138 blkg_put(blkg->parent); 139 + spin_lock_irq(&q->queue_lock); 139 140 list_del_init(&blkg->q_node); 141 + spin_unlock_irq(&q->queue_lock); 140 142 mutex_unlock(&q->blkcg_mutex); 141 143 142 144 blk_put_queue(q);
+23 -13
block/blk-crypto-fallback.c
··· 78 78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; 79 79 } *blk_crypto_keyslots; 80 80 81 - static struct blk_crypto_profile blk_crypto_fallback_profile; 81 + static struct blk_crypto_profile *blk_crypto_fallback_profile; 82 82 static struct workqueue_struct *blk_crypto_wq; 83 83 static mempool_t *blk_crypto_bounce_page_pool; 84 84 static struct bio_set crypto_bio_split; ··· 292 292 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for 293 293 * this bio's algorithm and key. 294 294 */ 295 - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, 295 + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, 296 296 bc->bc_key, &slot); 297 297 if (blk_st != BLK_STS_OK) { 298 298 src_bio->bi_status = blk_st; ··· 395 395 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for 396 396 * this bio's algorithm and key. 397 397 */ 398 - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, 398 + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, 399 399 bc->bc_key, &slot); 400 400 if (blk_st != BLK_STS_OK) { 401 401 bio->bi_status = blk_st; ··· 499 499 return false; 500 500 } 501 501 502 - if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile, 502 + if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile, 503 503 &bc->bc_key->crypto_cfg)) { 504 504 bio->bi_status = BLK_STS_NOTSUPP; 505 505 return false; ··· 526 526 527 527 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) 528 528 { 529 - return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key); 529 + return __blk_crypto_evict_key(blk_crypto_fallback_profile, key); 530 530 } 531 531 532 532 static bool blk_crypto_fallback_inited; ··· 534 534 { 535 535 int i; 536 536 int err; 537 - struct blk_crypto_profile *profile = &blk_crypto_fallback_profile; 538 537 539 538 if (blk_crypto_fallback_inited) 540 539 return 0; ··· 544 545 if (err) 545 546 goto out; 546 547 547 - err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots); 548 - if (err) 548 + /* Dynamic allocation is needed because of lockdep_register_key(). */ 549 + blk_crypto_fallback_profile = 550 + kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL); 551 + if (!blk_crypto_fallback_profile) { 552 + err = -ENOMEM; 549 553 goto fail_free_bioset; 554 + } 555 + 556 + err = blk_crypto_profile_init(blk_crypto_fallback_profile, 557 + blk_crypto_num_keyslots); 558 + if (err) 559 + goto fail_free_profile; 550 560 err = -ENOMEM; 551 561 552 - profile->ll_ops = blk_crypto_fallback_ll_ops; 553 - profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; 562 + blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops; 563 + blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; 554 564 555 565 /* All blk-crypto modes have a crypto API fallback. */ 556 566 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) 557 - profile->modes_supported[i] = 0xFFFFFFFF; 558 - profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; 567 + blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF; 568 + blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; 559 569 560 570 blk_crypto_wq = alloc_workqueue("blk_crypto_wq", 561 571 WQ_UNBOUND | WQ_HIGHPRI | ··· 605 597 fail_free_wq: 606 598 destroy_workqueue(blk_crypto_wq); 607 599 fail_destroy_profile: 608 - blk_crypto_profile_destroy(profile); 600 + blk_crypto_profile_destroy(blk_crypto_fallback_profile); 601 + fail_free_profile: 602 + kfree(blk_crypto_fallback_profile); 609 603 fail_free_bioset: 610 604 bioset_exit(&crypto_bio_split); 611 605 out:
+20 -3
block/blk-mq.c
··· 681 681 } 682 682 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 683 683 684 + static void blk_mq_finish_request(struct request *rq) 685 + { 686 + struct request_queue *q = rq->q; 687 + 688 + if (rq->rq_flags & RQF_USE_SCHED) { 689 + q->elevator->type->ops.finish_request(rq); 690 + /* 691 + * For postflush request that may need to be 692 + * completed twice, we should clear this flag 693 + * to avoid double finish_request() on the rq. 694 + */ 695 + rq->rq_flags &= ~RQF_USE_SCHED; 696 + } 697 + } 698 + 684 699 static void __blk_mq_free_request(struct request *rq) 685 700 { 686 701 struct request_queue *q = rq->q; ··· 722 707 { 723 708 struct request_queue *q = rq->q; 724 709 725 - if ((rq->rq_flags & RQF_USE_SCHED) && 726 - q->elevator->type->ops.finish_request) 727 - q->elevator->type->ops.finish_request(rq); 710 + blk_mq_finish_request(rq); 728 711 729 712 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 730 713 laptop_io_completion(q->disk->bdi); ··· 1033 1020 if (blk_mq_need_time_stamp(rq)) 1034 1021 __blk_mq_end_request_acct(rq, ktime_get_ns()); 1035 1022 1023 + blk_mq_finish_request(rq); 1024 + 1036 1025 if (rq->end_io) { 1037 1026 rq_qos_done(rq->q, rq); 1038 1027 if (rq->end_io(rq, error) == RQ_END_IO_FREE) ··· 1088 1073 blk_complete_request(rq); 1089 1074 if (iob->need_ts) 1090 1075 __blk_mq_end_request_acct(rq, now); 1076 + 1077 + blk_mq_finish_request(rq); 1091 1078 1092 1079 rq_qos_done(rq->q, rq); 1093 1080
+3
block/elevator.c
··· 499 499 500 500 int elv_register(struct elevator_type *e) 501 501 { 502 + /* finish request is mandatory */ 503 + if (WARN_ON_ONCE(!e->ops.finish_request)) 504 + return -EINVAL; 502 505 /* insert_requests and dispatch_request are mandatory */ 503 506 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) 504 507 return -EINVAL;
+1 -1
drivers/block/rnbd/rnbd-clt-sysfs.c
··· 25 25 26 26 static struct device *rnbd_dev; 27 27 static const struct class rnbd_dev_class = { 28 - .name = "rnbd_client", 28 + .name = "rnbd-client", 29 29 }; 30 30 static struct kobject *rnbd_devs_kobj; 31 31