Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

block, nvme: remove unused dma_iova_state function parameter

DMA IOVA state is not used inside blk_rq_dma_map_iter_next, get
rid of the argument.

Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Nitesh Shetty and committed by
Jens Axboe
91e1c1bc 65955a09

+4 -6
+1 -2
block/blk-mq-dma.c
··· 238 238 * blk_rq_dma_map_iter_next - map the next DMA segment for a request 239 239 * @req: request to map 240 240 * @dma_dev: device to map to 241 - * @state: DMA IOVA state 242 241 * @iter: block layer DMA iterator 243 242 * 244 243 * Iterate to the next mapping after a previous call to ··· 252 253 * returned in @iter.status. 253 254 */ 254 255 bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, 255 - struct dma_iova_state *state, struct blk_dma_iter *iter) 256 + struct blk_dma_iter *iter) 256 257 { 257 258 struct phys_vec vec; 258 259
+2 -3
drivers/nvme/host/pci.c
··· 823 823 824 824 if (iter->len) 825 825 return true; 826 - if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) 826 + if (!blk_rq_dma_map_iter_next(req, dma_dev, iter)) 827 827 return false; 828 828 if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { 829 829 iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; ··· 1010 1010 } 1011 1011 nvme_pci_sgl_set_data(&sg_list[mapped++], iter); 1012 1012 iod->total_len += iter->len; 1013 - } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state, 1014 - iter)); 1013 + } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter)); 1015 1014 1016 1015 nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); 1017 1016 if (unlikely(iter->status))
+1 -1
include/linux/blk-mq-dma.h
··· 28 28 bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, 29 29 struct dma_iova_state *state, struct blk_dma_iter *iter); 30 30 bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, 31 - struct dma_iova_state *state, struct blk_dma_iter *iter); 31 + struct blk_dma_iter *iter); 32 32 33 33 /** 34 34 * blk_rq_dma_map_coalesce - were all segments coalesced?