Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

blk-mq-dma: bring back p2p request flags

We only need to consider data and metadata dma mapping types separately.
The request and bio integrity payload have enough flag bits to
internally track the mapping type for each. Use these so the caller
doesn't need to track them, and provide separete request and integrity
helpers to the common code. This will make it easier to scale new
mappings, like the proposed MMIO attribute, without burdening the caller
to track such things.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Keith Busch and committed by
Jens Axboe
d57447ff 05ceea5d

+35 -19
+4
block/blk-mq-dma.c
··· 174 174 switch (pci_p2pdma_state(&iter->p2pdma, dma_dev, 175 175 phys_to_page(vec.paddr))) { 176 176 case PCI_P2PDMA_MAP_BUS_ADDR: 177 + if (iter->iter.is_integrity) 178 + bio_integrity(req->bio)->bip_flags |= BIP_P2P_DMA; 179 + else 180 + req->cmd_flags |= REQ_P2PDMA; 177 181 return blk_dma_map_bus(iter, &vec); 178 182 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: 179 183 /*
+4 -17
drivers/nvme/host/pci.c
··· 260 260 /* single segment dma mapping */ 261 261 IOD_SINGLE_SEGMENT = 1U << 2, 262 262 263 - /* DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */ 264 - IOD_P2P_BUS_ADDR = 1U << 3, 265 - 266 - /* Metadata DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */ 267 - IOD_META_P2P_BUS_ADDR = 1U << 4, 268 - 269 263 /* Metadata using non-coalesced MPTR */ 270 264 IOD_SINGLE_META_SEGMENT = 1U << 5, 271 265 }; ··· 731 737 return; 732 738 } 733 739 734 - if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state, 735 - iod->meta_total_len, 736 - iod->flags & IOD_META_P2P_BUS_ADDR)) { 740 + if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, 741 + iod->meta_total_len)) { 737 742 if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) 738 743 nvme_free_sgls(req, sge, &sge[1]); 739 744 else ··· 759 766 return; 760 767 } 761 768 762 - if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len, 763 - iod->flags & IOD_P2P_BUS_ADDR)) { 769 + if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { 764 770 if (nvme_pci_cmd_use_sgl(&iod->cmd)) 765 771 nvme_free_sgls(req, iod->descriptors[0], 766 772 &iod->cmd.common.dptr.sgl); ··· 1035 1043 if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) 1036 1044 return iter.status; 1037 1045 1038 - if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR) 1039 - iod->flags |= IOD_P2P_BUS_ADDR; 1040 - 1041 1046 if (use_sgl == SGL_FORCED || 1042 1047 (use_sgl == SGL_SUPPORTED && 1043 1048 (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold))) ··· 1057 1068 &iod->meta_dma_state, &iter)) 1058 1069 return iter.status; 1059 1070 1060 - if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR) 1061 - iod->flags |= IOD_META_P2P_BUS_ADDR; 1062 - else if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) 1071 + if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) 1063 1072 entries = 1; 1064 1073 1065 1074 /*
+1
include/linux/bio-integrity.h
··· 13 13 BIP_CHECK_GUARD = 1 << 5, /* guard check */ 14 14 BIP_CHECK_REFTAG = 1 << 6, /* reftag check */ 15 15 BIP_CHECK_APPTAG = 1 << 7, /* apptag check */ 16 + BIP_P2P_DMA = 1 << 8, /* using P2P address */ 16 17 }; 17 18 18 19 struct bio_integrity_payload {
+15
include/linux/blk-integrity.h
··· 27 27 28 28 #ifdef CONFIG_BLK_DEV_INTEGRITY 29 29 int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 30 + 31 + static inline bool blk_rq_integrity_dma_unmap(struct request *req, 32 + struct device *dma_dev, struct dma_iova_state *state, 33 + size_t mapped_len) 34 + { 35 + return blk_dma_unmap(req, dma_dev, state, mapped_len, 36 + bio_integrity(req->bio)->bip_flags & BIP_P2P_DMA); 37 + } 38 + 30 39 int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 31 40 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, 32 41 ssize_t bytes); ··· 123 114 struct scatterlist *s) 124 115 { 125 116 return 0; 117 + } 118 + static inline bool blk_rq_integrity_dma_unmap(struct request *req, 119 + struct device *dma_dev, struct dma_iova_state *state, 120 + size_t mapped_len) 121 + { 122 + return false; 126 123 } 127 124 static inline int blk_rq_integrity_map_user(struct request *rq, 128 125 void __user *ubuf,
+9 -2
include/linux/blk-mq-dma.h
··· 43 43 } 44 44 45 45 /** 46 - * blk_rq_dma_unmap - try to DMA unmap a request 46 + * blk_dma_unmap - try to DMA unmap a request 47 47 * @req: request to unmap 48 48 * @dma_dev: device to unmap from 49 49 * @state: DMA IOVA state ··· 53 53 * Returns %false if the callers need to manually unmap every DMA segment 54 54 * mapped using @iter or %true if no work is left to be done. 55 55 */ 56 - static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev, 56 + static inline bool blk_dma_unmap(struct request *req, struct device *dma_dev, 57 57 struct dma_iova_state *state, size_t mapped_len, bool is_p2p) 58 58 { 59 59 if (is_p2p) ··· 66 66 } 67 67 68 68 return !dma_need_unmap(dma_dev); 69 + } 70 + 71 + static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev, 72 + struct dma_iova_state *state, size_t mapped_len) 73 + { 74 + return blk_dma_unmap(req, dma_dev, state, mapped_len, 75 + req->cmd_flags & REQ_P2PDMA); 69 76 } 70 77 71 78 #endif /* BLK_MQ_DMA_H */
+2
include/linux/blk_types.h
··· 384 384 __REQ_DRV, /* for driver use */ 385 385 __REQ_FS_PRIVATE, /* for file system (submitter) use */ 386 386 __REQ_ATOMIC, /* for atomic write operations */ 387 + __REQ_P2PDMA, /* contains P2P DMA pages */ 387 388 /* 388 389 * Command specific flags, keep last: 389 390 */ ··· 417 416 #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) 418 417 #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) 419 418 #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) 419 + #define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA) 420 420 421 421 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) 422 422