Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

nvme-pci: migrate to dma_map_phys instead of map_page

After introduction of dma_map_phys(), there is no need to convert
from physical address to struct page in order to map page. So let's
use it directly.

Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Leon Romanovsky and committed by
Jens Axboe
61d43b17 8e1bf774

+15 -14
+2 -2
block/blk-mq-dma.c
··· 92 92 static bool blk_dma_map_direct(struct request *req, struct device *dma_dev, 93 93 struct blk_dma_iter *iter, struct phys_vec *vec) 94 94 { 95 - iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr), 96 - offset_in_page(vec->paddr), vec->len, rq_dma_dir(req)); 95 + iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len, 96 + rq_dma_dir(req), 0); 97 97 if (dma_mapping_error(dma_dev, iter->addr)) { 98 98 iter->status = BLK_STS_RESOURCE; 99 99 return false;
+13 -12
drivers/nvme/host/pci.c
··· 698 698 } 699 699 } 700 700 701 - static void nvme_free_prps(struct request *req) 701 + static void nvme_free_prps(struct request *req, unsigned int attrs) 702 702 { 703 703 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 704 704 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 705 705 unsigned int i; 706 706 707 707 for (i = 0; i < iod->nr_dma_vecs; i++) 708 - dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr, 709 - iod->dma_vecs[i].len, rq_dma_dir(req)); 708 + dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr, 709 + iod->dma_vecs[i].len, rq_dma_dir(req), attrs); 710 710 mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool); 711 711 } 712 712 713 713 static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge, 714 - struct nvme_sgl_desc *sg_list) 714 + struct nvme_sgl_desc *sg_list, unsigned int attrs) 715 715 { 716 716 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 717 717 enum dma_data_direction dir = rq_dma_dir(req); ··· 720 720 unsigned int i; 721 721 722 722 if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) { 723 - dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir); 723 + dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir, 724 + attrs); 724 725 return; 725 726 } 726 727 727 728 for (i = 0; i < len / sizeof(*sg_list); i++) 728 - dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr), 729 - le32_to_cpu(sg_list[i].length), dir); 729 + dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr), 730 + le32_to_cpu(sg_list[i].length), dir, attrs); 730 731 } 731 732 732 733 static void nvme_unmap_metadata(struct request *req) ··· 748 747 if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, 749 748 iod->meta_total_len)) { 750 749 if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) 751 - nvme_free_sgls(req, sge, &sge[1]); 750 + nvme_free_sgls(req, sge, &sge[1], 0); 752 751 else 753 - dma_unmap_page(dma_dev, iod->meta_dma, 754 - iod->meta_total_len, dir); 752 + dma_unmap_phys(dma_dev, iod->meta_dma, 753 + iod->meta_total_len, dir, 0); 755 754 } 756 755 757 756 if (iod->meta_descriptor) ··· 776 775 if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { 777 776 if (nvme_pci_cmd_use_sgl(&iod->cmd)) 778 777 nvme_free_sgls(req, iod->descriptors[0], 779 - &iod->cmd.common.dptr.sgl); 778 + &iod->cmd.common.dptr.sgl, 0); 780 779 else 781 - nvme_free_prps(req); 780 + nvme_free_prps(req, 0); 782 781 } 783 782 784 783 if (iod->nr_descriptors)