Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

blk-mq-sched: add new parameter nr_requests in blk_mq_alloc_sched_tags()

This helper only support to allocate the default number of requests,
add a new parameter to support specific number of requests.

Prepare to fix potential deadlock in the case nr_requests grow.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Yu Kuai and committed by
Jens Axboe
6293e336 e6320040

+19 -11
+5 -9
block/blk-mq-sched.c
··· 454 454 } 455 455 456 456 struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, 457 - unsigned int nr_hw_queues) 457 + unsigned int nr_hw_queues, unsigned int nr_requests) 458 458 { 459 459 unsigned int nr_tags; 460 460 int i; ··· 470 470 nr_tags * sizeof(struct blk_mq_tags *), gfp); 471 471 if (!et) 472 472 return NULL; 473 - /* 474 - * Default to double of smaller one between hw queue_depth and 475 - * 128, since we don't split into sync/async like the old code 476 - * did. Additionally, this is a per-hw queue depth. 477 - */ 478 - et->nr_requests = 2 * min_t(unsigned int, set->queue_depth, 479 - BLKDEV_DEFAULT_RQ); 473 + 474 + et->nr_requests = nr_requests; 480 475 et->nr_hw_queues = nr_hw_queues; 481 476 482 477 if (blk_mq_is_shared_tags(set->flags)) { ··· 516 521 * concurrently. 517 522 */ 518 523 if (q->elevator) { 519 - et = blk_mq_alloc_sched_tags(set, nr_hw_queues); 524 + et = blk_mq_alloc_sched_tags(set, nr_hw_queues, 525 + blk_mq_default_nr_requests(set)); 520 526 if (!et) 521 527 goto out_unwind; 522 528 if (xa_insert(et_table, q->id, et, gfp))
+1 -1
block/blk-mq-sched.h
··· 24 24 void blk_mq_sched_free_rqs(struct request_queue *q); 25 25 26 26 struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, 27 - unsigned int nr_hw_queues); 27 + unsigned int nr_hw_queues, unsigned int nr_requests); 28 28 int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, 29 29 struct blk_mq_tag_set *set, unsigned int nr_hw_queues); 30 30 void blk_mq_free_sched_tags(struct elevator_tags *et,
+11
block/blk-mq.h
··· 110 110 } 111 111 112 112 /* 113 + * Default to double of smaller one between hw queue_depth and 114 + * 128, since we don't split into sync/async like the old code 115 + * did. Additionally, this is a per-hw queue depth. 116 + */ 117 + static inline unsigned int blk_mq_default_nr_requests( 118 + struct blk_mq_tag_set *set) 119 + { 120 + return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ); 121 + } 122 + 123 + /* 113 124 * sysfs helpers 114 125 */ 115 126 extern void blk_mq_sysfs_init(struct request_queue *q);
+2 -1
block/elevator.c
··· 669 669 lockdep_assert_held(&set->update_nr_hwq_lock); 670 670 671 671 if (strncmp(ctx->name, "none", 4)) { 672 - ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues); 672 + ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues, 673 + blk_mq_default_nr_requests(set)); 673 674 if (!ctx->et) 674 675 return -ENOMEM; 675 676 }