Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4 */
5
6#include <linux/vfio.h>
7#include <linux/cdx/cdx_bus.h>
8
9#include "private.h"
10
11static int vfio_cdx_init_dev(struct vfio_device *core_vdev)
12{
13 struct vfio_cdx_device *vdev =
14 container_of(core_vdev, struct vfio_cdx_device, vdev);
15
16 mutex_init(&vdev->cdx_irqs_lock);
17 return 0;
18}
19
20static void vfio_cdx_release_dev(struct vfio_device *core_vdev)
21{
22 struct vfio_cdx_device *vdev =
23 container_of(core_vdev, struct vfio_cdx_device, vdev);
24
25 mutex_destroy(&vdev->cdx_irqs_lock);
26}
27
28static int vfio_cdx_open_device(struct vfio_device *core_vdev)
29{
30 struct vfio_cdx_device *vdev =
31 container_of(core_vdev, struct vfio_cdx_device, vdev);
32 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
33 int count = cdx_dev->res_count;
34 int i, ret;
35
36 vdev->regions = kzalloc_objs(struct vfio_cdx_region, count,
37 GFP_KERNEL_ACCOUNT);
38 if (!vdev->regions)
39 return -ENOMEM;
40
41 for (i = 0; i < count; i++) {
42 struct resource *res = &cdx_dev->res[i];
43
44 vdev->regions[i].addr = res->start;
45 vdev->regions[i].size = resource_size(res);
46 vdev->regions[i].type = res->flags;
47 /*
48 * Only regions addressed with PAGE granularity may be
49 * MMAP'ed securely.
50 */
51 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
52 !(vdev->regions[i].size & ~PAGE_MASK))
53 vdev->regions[i].flags |=
54 VFIO_REGION_INFO_FLAG_MMAP;
55 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
56 if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
57 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
58 }
59 ret = cdx_dev_reset(core_vdev->dev);
60 if (ret) {
61 kfree(vdev->regions);
62 vdev->regions = NULL;
63 return ret;
64 }
65 ret = cdx_clear_master(cdx_dev);
66 if (ret)
67 vdev->flags &= ~BME_SUPPORT;
68 else
69 vdev->flags |= BME_SUPPORT;
70
71 return 0;
72}
73
74static void vfio_cdx_close_device(struct vfio_device *core_vdev)
75{
76 struct vfio_cdx_device *vdev =
77 container_of(core_vdev, struct vfio_cdx_device, vdev);
78
79 kfree(vdev->regions);
80 cdx_dev_reset(core_vdev->dev);
81 vfio_cdx_irqs_cleanup(vdev);
82}
83
84static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags,
85 void __user *arg, size_t argsz)
86{
87 size_t minsz =
88 offsetofend(struct vfio_device_feature_bus_master, op);
89 struct vfio_cdx_device *vdev =
90 container_of(core_vdev, struct vfio_cdx_device, vdev);
91 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
92 struct vfio_device_feature_bus_master ops;
93 int ret;
94
95 if (!(vdev->flags & BME_SUPPORT))
96 return -ENOTTY;
97
98 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
99 sizeof(ops));
100 if (ret != 1)
101 return ret;
102
103 if (copy_from_user(&ops, arg, minsz))
104 return -EFAULT;
105
106 switch (ops.op) {
107 case VFIO_DEVICE_FEATURE_CLEAR_MASTER:
108 return cdx_clear_master(cdx_dev);
109 case VFIO_DEVICE_FEATURE_SET_MASTER:
110 return cdx_set_master(cdx_dev);
111 default:
112 return -EINVAL;
113 }
114}
115
116static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags,
117 void __user *arg, size_t argsz)
118{
119 switch (flags & VFIO_DEVICE_FEATURE_MASK) {
120 case VFIO_DEVICE_FEATURE_BUS_MASTER:
121 return vfio_cdx_bm_ctrl(device, flags, arg, argsz);
122 default:
123 return -ENOTTY;
124 }
125}
126
127static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
128 struct vfio_device_info __user *arg)
129{
130 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
131 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
132 struct vfio_device_info info;
133
134 if (copy_from_user(&info, arg, minsz))
135 return -EFAULT;
136
137 if (info.argsz < minsz)
138 return -EINVAL;
139
140 info.flags = VFIO_DEVICE_FLAGS_CDX;
141 info.flags |= VFIO_DEVICE_FLAGS_RESET;
142
143 info.num_regions = cdx_dev->res_count;
144 info.num_irqs = cdx_dev->num_msi ? 1 : 0;
145
146 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
147}
148
149static int vfio_cdx_ioctl_get_region_info(struct vfio_device *core_vdev,
150 struct vfio_region_info *info,
151 struct vfio_info_cap *caps)
152{
153 struct vfio_cdx_device *vdev =
154 container_of(core_vdev, struct vfio_cdx_device, vdev);
155 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
156
157 if (info->index >= cdx_dev->res_count)
158 return -EINVAL;
159
160 /* map offset to the physical address */
161 info->offset = vfio_cdx_index_to_offset(info->index);
162 info->size = vdev->regions[info->index].size;
163 info->flags = vdev->regions[info->index].flags;
164 return 0;
165}
166
167static int vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device *vdev,
168 struct vfio_irq_info __user *arg)
169{
170 unsigned long minsz = offsetofend(struct vfio_irq_info, count);
171 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
172 struct vfio_irq_info info;
173
174 if (copy_from_user(&info, arg, minsz))
175 return -EFAULT;
176
177 if (info.argsz < minsz)
178 return -EINVAL;
179
180 if (info.index >= 1)
181 return -EINVAL;
182
183 if (!cdx_dev->num_msi)
184 return -EINVAL;
185
186 info.flags = VFIO_IRQ_INFO_EVENTFD | VFIO_IRQ_INFO_NORESIZE;
187 info.count = cdx_dev->num_msi;
188
189 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
190}
191
192static int vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device *vdev,
193 struct vfio_irq_set __user *arg)
194{
195 unsigned long minsz = offsetofend(struct vfio_irq_set, count);
196 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
197 struct vfio_irq_set hdr;
198 size_t data_size = 0;
199 u8 *data = NULL;
200 int ret = 0;
201
202 if (copy_from_user(&hdr, arg, minsz))
203 return -EFAULT;
204
205 ret = vfio_set_irqs_validate_and_prepare(&hdr, cdx_dev->num_msi,
206 1, &data_size);
207 if (ret)
208 return ret;
209
210 if (data_size) {
211 data = memdup_user(arg->data, data_size);
212 if (IS_ERR(data))
213 return PTR_ERR(data);
214 }
215
216 ret = vfio_cdx_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
217 hdr.start, hdr.count, data);
218 kfree(data);
219
220 return ret;
221}
222
223static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
224 unsigned int cmd, unsigned long arg)
225{
226 struct vfio_cdx_device *vdev =
227 container_of(core_vdev, struct vfio_cdx_device, vdev);
228 void __user *uarg = (void __user *)arg;
229
230 switch (cmd) {
231 case VFIO_DEVICE_GET_INFO:
232 return vfio_cdx_ioctl_get_info(vdev, uarg);
233 case VFIO_DEVICE_GET_IRQ_INFO:
234 return vfio_cdx_ioctl_get_irq_info(vdev, uarg);
235 case VFIO_DEVICE_SET_IRQS:
236 return vfio_cdx_ioctl_set_irqs(vdev, uarg);
237 case VFIO_DEVICE_RESET:
238 return cdx_dev_reset(core_vdev->dev);
239 default:
240 return -ENOTTY;
241 }
242}
243
244static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
245 struct vm_area_struct *vma)
246{
247 u64 size = vma->vm_end - vma->vm_start;
248 u64 pgoff, base;
249
250 pgoff = vma->vm_pgoff &
251 ((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
252 base = pgoff << PAGE_SHIFT;
253
254 if (base + size > region.size)
255 return -EINVAL;
256
257 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
258 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
259
260 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
261 size, vma->vm_page_prot);
262}
263
264static int vfio_cdx_mmap(struct vfio_device *core_vdev,
265 struct vm_area_struct *vma)
266{
267 struct vfio_cdx_device *vdev =
268 container_of(core_vdev, struct vfio_cdx_device, vdev);
269 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
270 unsigned int index;
271
272 index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
273
274 if (index >= cdx_dev->res_count)
275 return -EINVAL;
276
277 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
278 return -EINVAL;
279
280 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
281 (vma->vm_flags & VM_READ))
282 return -EPERM;
283
284 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
285 (vma->vm_flags & VM_WRITE))
286 return -EPERM;
287
288 return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
289}
290
291static const struct vfio_device_ops vfio_cdx_ops = {
292 .name = "vfio-cdx",
293 .init = vfio_cdx_init_dev,
294 .release = vfio_cdx_release_dev,
295 .open_device = vfio_cdx_open_device,
296 .close_device = vfio_cdx_close_device,
297 .ioctl = vfio_cdx_ioctl,
298 .get_region_info_caps = vfio_cdx_ioctl_get_region_info,
299 .device_feature = vfio_cdx_ioctl_feature,
300 .mmap = vfio_cdx_mmap,
301 .bind_iommufd = vfio_iommufd_physical_bind,
302 .unbind_iommufd = vfio_iommufd_physical_unbind,
303 .attach_ioas = vfio_iommufd_physical_attach_ioas,
304};
305
306static int vfio_cdx_probe(struct cdx_device *cdx_dev)
307{
308 struct vfio_cdx_device *vdev;
309 struct device *dev = &cdx_dev->dev;
310 int ret;
311
312 vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
313 &vfio_cdx_ops);
314 if (IS_ERR(vdev))
315 return PTR_ERR(vdev);
316
317 ret = vfio_register_group_dev(&vdev->vdev);
318 if (ret)
319 goto out_uninit;
320
321 dev_set_drvdata(dev, vdev);
322 return 0;
323
324out_uninit:
325 vfio_put_device(&vdev->vdev);
326 return ret;
327}
328
329static int vfio_cdx_remove(struct cdx_device *cdx_dev)
330{
331 struct device *dev = &cdx_dev->dev;
332 struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
333
334 vfio_unregister_group_dev(&vdev->vdev);
335 vfio_put_device(&vdev->vdev);
336
337 return 0;
338}
339
340static const struct cdx_device_id vfio_cdx_table[] = {
341 { CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
342 CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
343 {}
344};
345
346MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
347
348static struct cdx_driver vfio_cdx_driver = {
349 .probe = vfio_cdx_probe,
350 .remove = vfio_cdx_remove,
351 .match_id_table = vfio_cdx_table,
352 .driver = {
353 .name = "vfio-cdx",
354 },
355 .driver_managed_dma = true,
356};
357
358module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
359
360MODULE_LICENSE("GPL");
361MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
362MODULE_IMPORT_NS("CDX_BUS");