Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

vduse: switch to use virtio map API instead of DMA API

Lacking the support of device specific mapping supported in virtio,
VDUSE must trick the DMA API in order to make virtio-vdpa transport
work. This is done by advertising vDPA device as dma device with a
VDUSE specific dma_ops even if it doesn't do DMA at all.

This will be fixed by this patch. Thanks to the new mapping operations
support by virtio and vDPA. VDUSE can simply switch to advertise its
specific mappings operations to virtio via virtio-vdpa then DMA API is
not needed for VDUSE any more and iova domain could be used as the
mapping token instead.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20250924070045.10361-3-jasowang@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Eugenio Pérez <eperezma@redhat.com>

authored by

Jason Wang and committed by
Michael S. Tsirkin
1c14b0e4 0d16cc43

+46 -48
+1 -7
drivers/vdpa/Kconfig
··· 34 34 35 35 config VDPA_USER 36 36 tristate "VDUSE (vDPA Device in Userspace) support" 37 - depends on EVENTFD && MMU && HAS_DMA 38 - # 39 - # This driver incorrectly tries to override the dma_ops. It should 40 - # never have done that, but for now keep it working on architectures 41 - # that use dma ops 42 - # 43 - depends on ARCH_HAS_DMA_OPS 37 + depends on EVENTFD && MMU 44 38 select VHOST_IOTLB 45 39 select IOMMU_IOVA 46 40 help
+1 -1
drivers/vdpa/vdpa_user/iova_domain.c
··· 447 447 448 448 void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, 449 449 size_t size, dma_addr_t *dma_addr, 450 - gfp_t flag, unsigned long attrs) 450 + gfp_t flag) 451 451 { 452 452 struct iova_domain *iovad = &domain->consistent_iovad; 453 453 unsigned long limit = domain->iova_limit;
+1 -1
drivers/vdpa/vdpa_user/iova_domain.h
··· 64 64 65 65 void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, 66 66 size_t size, dma_addr_t *dma_addr, 67 - gfp_t flag, unsigned long attrs); 67 + gfp_t flag); 68 68 69 69 void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, 70 70 void *vaddr, dma_addr_t dma_addr,
+39 -39
drivers/vdpa/vdpa_user/vduse_dev.c
··· 814 814 .free = vduse_vdpa_free, 815 815 }; 816 816 817 - static void vduse_dev_sync_single_for_device(struct device *dev, 817 + static void vduse_dev_sync_single_for_device(union virtio_map token, 818 818 dma_addr_t dma_addr, size_t size, 819 819 enum dma_data_direction dir) 820 820 { 821 - struct vduse_dev *vdev = dev_to_vduse(dev); 822 - struct vduse_iova_domain *domain = vdev->domain; 821 + struct vduse_iova_domain *domain = token.iova_domain; 823 822 824 823 vduse_domain_sync_single_for_device(domain, dma_addr, size, dir); 825 824 } 826 825 827 - static void vduse_dev_sync_single_for_cpu(struct device *dev, 826 + static void vduse_dev_sync_single_for_cpu(union virtio_map token, 828 827 dma_addr_t dma_addr, size_t size, 829 828 enum dma_data_direction dir) 830 829 { 831 - struct vduse_dev *vdev = dev_to_vduse(dev); 832 - struct vduse_iova_domain *domain = vdev->domain; 830 + struct vduse_iova_domain *domain = token.iova_domain; 833 831 834 832 vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); 835 833 } 836 834 837 - static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, 835 + static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page, 838 836 unsigned long offset, size_t size, 839 837 enum dma_data_direction dir, 840 838 unsigned long attrs) 841 839 { 842 - struct vduse_dev *vdev = dev_to_vduse(dev); 843 - struct vduse_iova_domain *domain = vdev->domain; 840 + struct vduse_iova_domain *domain = token.iova_domain; 844 841 845 842 return vduse_domain_map_page(domain, page, offset, size, dir, attrs); 846 843 } 847 844 848 - static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr, 849 - size_t size, enum dma_data_direction dir, 850 - unsigned long attrs) 845 + static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr, 846 + size_t size, enum dma_data_direction dir, 847 + unsigned long attrs) 851 848 { 852 - struct vduse_dev *vdev = dev_to_vduse(dev); 853 - struct vduse_iova_domain *domain = vdev->domain; 849 + struct vduse_iova_domain *domain = token.iova_domain; 854 850 855 851 return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs); 856 852 } 857 853 858 - static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, 859 - dma_addr_t *dma_addr, gfp_t flag, 860 - unsigned long attrs) 854 + static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size, 855 + dma_addr_t *dma_addr, gfp_t flag) 861 856 { 862 - struct vduse_dev *vdev = dev_to_vduse(dev); 863 - struct vduse_iova_domain *domain = vdev->domain; 857 + struct vduse_iova_domain *domain = token.iova_domain; 864 858 unsigned long iova; 865 859 void *addr; 866 860 867 861 *dma_addr = DMA_MAPPING_ERROR; 868 862 addr = vduse_domain_alloc_coherent(domain, size, 869 - (dma_addr_t *)&iova, flag, attrs); 863 + (dma_addr_t *)&iova, flag); 870 864 if (!addr) 871 865 return NULL; 872 866 ··· 869 875 return addr; 870 876 } 871 877 872 - static void vduse_dev_free_coherent(struct device *dev, size_t size, 873 - void *vaddr, dma_addr_t dma_addr, 874 - unsigned long attrs) 878 + static void vduse_dev_free_coherent(union virtio_map token, size_t size, 879 + void *vaddr, dma_addr_t dma_addr, 880 + unsigned long attrs) 875 881 { 876 - struct vduse_dev *vdev = dev_to_vduse(dev); 877 - struct vduse_iova_domain *domain = vdev->domain; 882 + struct vduse_iova_domain *domain = token.iova_domain; 878 883 879 884 vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs); 880 885 } 881 886 882 - static size_t vduse_dev_max_mapping_size(struct device *dev) 887 + static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr) 883 888 { 884 - struct vduse_dev *vdev = dev_to_vduse(dev); 885 - struct vduse_iova_domain *domain = vdev->domain; 889 + struct vduse_iova_domain *domain = token.iova_domain; 890 + 891 + return dma_addr < domain->bounce_size; 892 + } 893 + 894 + static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr) 895 + { 896 + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) 897 + return -ENOMEM; 898 + return 0; 899 + } 900 + 901 + static size_t vduse_dev_max_mapping_size(union virtio_map token) 902 + { 903 + struct vduse_iova_domain *domain = token.iova_domain; 886 904 887 905 return domain->bounce_size; 888 906 } 889 907 890 - static const struct dma_map_ops vduse_dev_dma_ops = { 908 + static const struct virtio_map_ops vduse_map_ops = { 891 909 .sync_single_for_device = vduse_dev_sync_single_for_device, 892 910 .sync_single_for_cpu = vduse_dev_sync_single_for_cpu, 893 911 .map_page = vduse_dev_map_page, 894 912 .unmap_page = vduse_dev_unmap_page, 895 913 .alloc = vduse_dev_alloc_coherent, 896 914 .free = vduse_dev_free_coherent, 915 + .need_sync = vduse_dev_need_sync, 916 + .mapping_error = vduse_dev_mapping_error, 897 917 .max_mapping_size = vduse_dev_max_mapping_size, 898 918 }; 899 919 ··· 2011 2003 static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) 2012 2004 { 2013 2005 struct vduse_vdpa *vdev; 2014 - int ret; 2015 2006 2016 2007 if (dev->vdev) 2017 2008 return -EEXIST; 2018 2009 2019 2010 vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev, 2020 - &vduse_vdpa_config_ops, NULL, 2011 + &vduse_vdpa_config_ops, &vduse_map_ops, 2021 2012 1, 1, name, true); 2022 2013 if (IS_ERR(vdev)) 2023 2014 return PTR_ERR(vdev); 2024 2015 2025 2016 dev->vdev = vdev; 2026 2017 vdev->dev = dev; 2027 - vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask; 2028 - ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64)); 2029 - if (ret) { 2030 - put_device(&vdev->vdpa.dev); 2031 - return ret; 2032 - } 2033 - set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops); 2034 - vdev->vdpa.vmap.dma_dev = &vdev->vdpa.dev; 2035 2018 vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; 2036 2019 2037 2020 return 0; ··· 2055 2056 return -ENOMEM; 2056 2057 } 2057 2058 2059 + dev->vdev->vdpa.vmap.iova_domain = dev->domain; 2058 2060 ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num); 2059 2061 if (ret) { 2060 2062 put_device(&dev->vdev->vdpa.dev);
+4
include/linux/virtio.h
··· 41 41 void *priv; 42 42 }; 43 43 44 + struct vduse_iova_domain; 45 + 44 46 union virtio_map { 45 47 /* Device that performs DMA */ 46 48 struct device *dma_dev; 49 + /* VDUSE specific mapping data */ 50 + struct vduse_iova_domain *iova_domain; 47 51 }; 48 52 49 53 int virtqueue_add_outbuf(struct virtqueue *vq,