Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

blk-mq: add a new queue sysfs attribute async_depth

Add a new field async_depth to request_queue and related APIs, this is
currently not used, following patches will convert elevators to use
this instead of internal async_depth.

Signed-off-by: Yu Kuai <yukuai@fnnas.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Yu Kuai and committed by
Jens Axboe
f98afe4f cf02d7d4

+51
+1
block/blk-core.c
··· 463 463 fs_reclaim_release(GFP_KERNEL); 464 464 465 465 q->nr_requests = BLKDEV_DEFAULT_RQ; 466 + q->async_depth = BLKDEV_DEFAULT_RQ; 466 467 467 468 return q; 468 469
+6
block/blk-mq.c
··· 4662 4662 spin_lock_init(&q->requeue_lock); 4663 4663 4664 4664 q->nr_requests = set->queue_depth; 4665 + q->async_depth = set->queue_depth; 4665 4666 4666 4667 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4667 4668 blk_mq_map_swqueue(q); ··· 5029 5028 q->elevator->et = et; 5030 5029 } 5031 5030 5031 + /* 5032 + * Preserve relative value, both nr and async_depth are at most 16 bit 5033 + * value, no need to worry about overflow. 5034 + */ 5035 + q->async_depth = max(q->async_depth * nr / q->nr_requests, 1); 5032 5036 q->nr_requests = nr; 5033 5037 if (q->elevator && q->elevator->type->ops.depth_updated) 5034 5038 q->elevator->type->ops.depth_updated(q);
+42
block/blk-sysfs.c
··· 127 127 return ret; 128 128 } 129 129 130 + static ssize_t queue_async_depth_show(struct gendisk *disk, char *page) 131 + { 132 + guard(mutex)(&disk->queue->elevator_lock); 133 + 134 + return queue_var_show(disk->queue->async_depth, page); 135 + } 136 + 137 + static ssize_t 138 + queue_async_depth_store(struct gendisk *disk, const char *page, size_t count) 139 + { 140 + struct request_queue *q = disk->queue; 141 + unsigned int memflags; 142 + unsigned long nr; 143 + int ret; 144 + 145 + if (!queue_is_mq(q)) 146 + return -EINVAL; 147 + 148 + ret = queue_var_store(&nr, page, count); 149 + if (ret < 0) 150 + return ret; 151 + 152 + if (nr == 0) 153 + return -EINVAL; 154 + 155 + memflags = blk_mq_freeze_queue(q); 156 + scoped_guard(mutex, &q->elevator_lock) { 157 + if (q->elevator) { 158 + q->async_depth = min(q->nr_requests, nr); 159 + if (q->elevator->type->ops.depth_updated) 160 + q->elevator->type->ops.depth_updated(q); 161 + } else { 162 + ret = -EINVAL; 163 + } 164 + } 165 + blk_mq_unfreeze_queue(q, memflags); 166 + 167 + return ret; 168 + } 169 + 130 170 static ssize_t queue_ra_show(struct gendisk *disk, char *page) 131 171 { 132 172 ssize_t ret; ··· 572 532 } 573 533 574 534 QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 535 + QUEUE_RW_ENTRY(queue_async_depth, "async_depth"); 575 536 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 576 537 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 577 538 QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); ··· 760 719 */ 761 720 &elv_iosched_entry.attr, 762 721 &queue_requests_entry.attr, 722 + &queue_async_depth_entry.attr, 763 723 #ifdef CONFIG_BLK_WBT 764 724 &queue_wb_lat_entry.attr, 765 725 #endif
+1
block/elevator.c
··· 589 589 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); 590 590 q->elevator = NULL; 591 591 q->nr_requests = q->tag_set->queue_depth; 592 + q->async_depth = q->tag_set->queue_depth; 592 593 } 593 594 blk_add_trace_msg(q, "elv switch: %s", ctx->name); 594 595
+1
include/linux/blkdev.h
··· 551 551 * queue settings 552 552 */ 553 553 unsigned int nr_requests; /* Max # of requests */ 554 + unsigned int async_depth; /* Max # of async requests */ 554 555 555 556 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 556 557 struct blk_crypto_profile *crypto_profile;