Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

blk-mq: move the DMA mapping code to a separate file

While working on the new DMA API I kept getting annoyed how it was placed
right in the middle of the bio splitting code in blk-merge.c.
Split it out into a separate file.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250513071433.836797-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
b0a41585 7ee4fa04

+136 -135
+2 -2
block/Makefile
··· 5 5 6 6 obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ 7 7 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ 8 - blk-merge.o blk-timeout.o \ 9 - blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ 8 + blk-merge.o blk-timeout.o blk-lib.o blk-mq.o \ 9 + blk-mq-tag.o blk-mq-dma.o blk-stat.o \ 10 10 blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ 11 11 genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \ 12 12 disk-events.o blk-ia-ranges.o early-lookup.o
-133
block/blk-merge.c
··· 7 7 #include <linux/bio.h> 8 8 #include <linux/blkdev.h> 9 9 #include <linux/blk-integrity.h> 10 - #include <linux/scatterlist.h> 11 10 #include <linux/part_stat.h> 12 11 #include <linux/blk-cgroup.h> 13 12 ··· 222 223 if (end > start) 223 224 return end - start; 224 225 return max_sectors & ~(lbs - 1); 225 - } 226 - 227 - /** 228 - * get_max_segment_size() - maximum number of bytes to add as a single segment 229 - * @lim: Request queue limits. 230 - * @paddr: address of the range to add 231 - * @len: maximum length available to add at @paddr 232 - * 233 - * Returns the maximum number of bytes of the range starting at @paddr that can 234 - * be added to a single segment. 235 - */ 236 - static inline unsigned get_max_segment_size(const struct queue_limits *lim, 237 - phys_addr_t paddr, unsigned int len) 238 - { 239 - /* 240 - * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 241 - * after having calculated the minimum. 242 - */ 243 - return min_t(unsigned long, len, 244 - min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr), 245 - (unsigned long)lim->max_segment_size - 1) + 1); 246 226 } 247 227 248 228 /** ··· 450 472 UINT_MAX, UINT_MAX); 451 473 return nr_phys_segs; 452 474 } 453 - 454 - struct phys_vec { 455 - phys_addr_t paddr; 456 - u32 len; 457 - }; 458 - 459 - static bool blk_map_iter_next(struct request *req, 460 - struct req_iterator *iter, struct phys_vec *vec) 461 - { 462 - unsigned int max_size; 463 - struct bio_vec bv; 464 - 465 - if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 466 - if (!iter->bio) 467 - return false; 468 - vec->paddr = bvec_phys(&req->special_vec); 469 - vec->len = req->special_vec.bv_len; 470 - iter->bio = NULL; 471 - return true; 472 - } 473 - 474 - if (!iter->iter.bi_size) 475 - return false; 476 - 477 - bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); 478 - vec->paddr = bvec_phys(&bv); 479 - max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX); 480 - bv.bv_len = min(bv.bv_len, max_size); 481 - bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); 482 - 483 - /* 484 - * If we are entirely done with this bi_io_vec entry, check if the next 485 - * one could be merged into it. This typically happens when moving to 486 - * the next bio, but some callers also don't pack bvecs tight. 487 - */ 488 - while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) { 489 - struct bio_vec next; 490 - 491 - if (!iter->iter.bi_size) { 492 - if (!iter->bio->bi_next) 493 - break; 494 - iter->bio = iter->bio->bi_next; 495 - iter->iter = iter->bio->bi_iter; 496 - } 497 - 498 - next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); 499 - if (bv.bv_len + next.bv_len > max_size || 500 - !biovec_phys_mergeable(req->q, &bv, &next)) 501 - break; 502 - 503 - bv.bv_len += next.bv_len; 504 - bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); 505 - } 506 - 507 - vec->len = bv.bv_len; 508 - return true; 509 - } 510 - 511 - static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, 512 - struct scatterlist *sglist) 513 - { 514 - if (!*sg) 515 - return sglist; 516 - 517 - /* 518 - * If the driver previously mapped a shorter list, we could see a 519 - * termination bit prematurely unless it fully inits the sg table 520 - * on each mapping. We KNOW that there must be more entries here 521 - * or the driver would be buggy, so force clear the termination bit 522 - * to avoid doing a full sg_init_table() in drivers for each command. 523 - */ 524 - sg_unmark_end(*sg); 525 - return sg_next(*sg); 526 - } 527 - 528 - /* 529 - * Map a request to scatterlist, return number of sg entries setup. Caller 530 - * must make sure sg can hold rq->nr_phys_segments entries. 531 - */ 532 - int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, 533 - struct scatterlist **last_sg) 534 - { 535 - struct req_iterator iter = { 536 - .bio = rq->bio, 537 - }; 538 - struct phys_vec vec; 539 - int nsegs = 0; 540 - 541 - /* the internal flush request may not have bio attached */ 542 - if (iter.bio) 543 - iter.iter = iter.bio->bi_iter; 544 - 545 - while (blk_map_iter_next(rq, &iter, &vec)) { 546 - *last_sg = blk_next_sg(last_sg, sglist); 547 - sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len, 548 - offset_in_page(vec.paddr)); 549 - nsegs++; 550 - } 551 - 552 - if (*last_sg) 553 - sg_mark_end(*last_sg); 554 - 555 - /* 556 - * Something must have been wrong if the figured number of 557 - * segment is bigger than number of req's physical segments 558 - */ 559 - WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 560 - 561 - return nsegs; 562 - } 563 - EXPORT_SYMBOL(__blk_rq_map_sg); 564 475 565 476 static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 566 477 sector_t offset)
+113
block/blk-mq-dma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include "blk.h" 3 + 4 + struct phys_vec { 5 + phys_addr_t paddr; 6 + u32 len; 7 + }; 8 + 9 + static bool blk_map_iter_next(struct request *req, struct req_iterator *iter, 10 + struct phys_vec *vec) 11 + { 12 + unsigned int max_size; 13 + struct bio_vec bv; 14 + 15 + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 16 + if (!iter->bio) 17 + return false; 18 + vec->paddr = bvec_phys(&req->special_vec); 19 + vec->len = req->special_vec.bv_len; 20 + iter->bio = NULL; 21 + return true; 22 + } 23 + 24 + if (!iter->iter.bi_size) 25 + return false; 26 + 27 + bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); 28 + vec->paddr = bvec_phys(&bv); 29 + max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX); 30 + bv.bv_len = min(bv.bv_len, max_size); 31 + bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); 32 + 33 + /* 34 + * If we are entirely done with this bi_io_vec entry, check if the next 35 + * one could be merged into it. This typically happens when moving to 36 + * the next bio, but some callers also don't pack bvecs tight. 37 + */ 38 + while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) { 39 + struct bio_vec next; 40 + 41 + if (!iter->iter.bi_size) { 42 + if (!iter->bio->bi_next) 43 + break; 44 + iter->bio = iter->bio->bi_next; 45 + iter->iter = iter->bio->bi_iter; 46 + } 47 + 48 + next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); 49 + if (bv.bv_len + next.bv_len > max_size || 50 + !biovec_phys_mergeable(req->q, &bv, &next)) 51 + break; 52 + 53 + bv.bv_len += next.bv_len; 54 + bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); 55 + } 56 + 57 + vec->len = bv.bv_len; 58 + return true; 59 + } 60 + 61 + static inline struct scatterlist * 62 + blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist) 63 + { 64 + if (!*sg) 65 + return sglist; 66 + 67 + /* 68 + * If the driver previously mapped a shorter list, we could see a 69 + * termination bit prematurely unless it fully inits the sg table 70 + * on each mapping. We KNOW that there must be more entries here 71 + * or the driver would be buggy, so force clear the termination bit 72 + * to avoid doing a full sg_init_table() in drivers for each command. 73 + */ 74 + sg_unmark_end(*sg); 75 + return sg_next(*sg); 76 + } 77 + 78 + /* 79 + * Map a request to scatterlist, return number of sg entries setup. Caller 80 + * must make sure sg can hold rq->nr_phys_segments entries. 81 + */ 82 + int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, 83 + struct scatterlist **last_sg) 84 + { 85 + struct req_iterator iter = { 86 + .bio = rq->bio, 87 + }; 88 + struct phys_vec vec; 89 + int nsegs = 0; 90 + 91 + /* the internal flush request may not have bio attached */ 92 + if (iter.bio) 93 + iter.iter = iter.bio->bi_iter; 94 + 95 + while (blk_map_iter_next(rq, &iter, &vec)) { 96 + *last_sg = blk_next_sg(last_sg, sglist); 97 + sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len, 98 + offset_in_page(vec.paddr)); 99 + nsegs++; 100 + } 101 + 102 + if (*last_sg) 103 + sg_mark_end(*last_sg); 104 + 105 + /* 106 + * Something must have been wrong if the figured number of 107 + * segment is bigger than number of req's physical segments 108 + */ 109 + WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 110 + 111 + return nsegs; 112 + } 113 + EXPORT_SYMBOL(__blk_rq_map_sg);
+21
block/blk.h
··· 404 404 } 405 405 } 406 406 407 + /** 408 + * get_max_segment_size() - maximum number of bytes to add as a single segment 409 + * @lim: Request queue limits. 410 + * @paddr: address of the range to add 411 + * @len: maximum length available to add at @paddr 412 + * 413 + * Returns the maximum number of bytes of the range starting at @paddr that can 414 + * be added to a single segment. 415 + */ 416 + static inline unsigned get_max_segment_size(const struct queue_limits *lim, 417 + phys_addr_t paddr, unsigned int len) 418 + { 419 + /* 420 + * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 421 + * after having calculated the minimum. 422 + */ 423 + return min_t(unsigned long, len, 424 + min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr), 425 + (unsigned long)lim->max_segment_size - 1) + 1); 426 + } 427 + 407 428 int ll_back_merge_fn(struct request *req, struct bio *bio, 408 429 unsigned int nr_segs); 409 430 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,