Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2025, Advanced Micro Devices, Inc.
4 */
5
6#include <drm/amdxdna_accel.h>
7#include <drm/drm_device.h>
8#include <drm/drm_print.h>
9#include <linux/dma-buf.h>
10#include <linux/overflow.h>
11#include <linux/pagemap.h>
12#include <linux/vmalloc.h>
13
14#include "amdxdna_pci_drv.h"
15#include "amdxdna_ubuf.h"
16
17struct amdxdna_ubuf_priv {
18 struct page **pages;
19 u64 nr_pages;
20 struct mm_struct *mm;
21};
22
23static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
24 enum dma_data_direction direction)
25{
26 struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
27 struct sg_table *sg;
28 int ret;
29
30 sg = kzalloc_obj(*sg);
31 if (!sg)
32 return ERR_PTR(-ENOMEM);
33
34 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
35 ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
36 if (ret)
37 goto err_free_sg;
38
39 ret = dma_map_sgtable(attach->dev, sg, direction, 0);
40 if (ret)
41 goto err_free_table;
42
43 return sg;
44
45err_free_table:
46 sg_free_table(sg);
47err_free_sg:
48 kfree(sg);
49 return ERR_PTR(ret);
50}
51
52static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
53 struct sg_table *sg,
54 enum dma_data_direction direction)
55{
56 dma_unmap_sgtable(attach->dev, sg, direction, 0);
57 sg_free_table(sg);
58 kfree(sg);
59}
60
61static void amdxdna_ubuf_release(struct dma_buf *dbuf)
62{
63 struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
64
65 unpin_user_pages(ubuf->pages, ubuf->nr_pages);
66 kvfree(ubuf->pages);
67 atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
68 mmdrop(ubuf->mm);
69 kfree(ubuf);
70}
71
72static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
73{
74 struct vm_area_struct *vma = vmf->vma;
75 struct amdxdna_ubuf_priv *ubuf;
76 unsigned long pfn;
77 pgoff_t pgoff;
78
79 ubuf = vma->vm_private_data;
80 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
81
82 pfn = page_to_pfn(ubuf->pages[pgoff]);
83 return vmf_insert_pfn(vma, vmf->address, pfn);
84}
85
86static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
87 .fault = amdxdna_ubuf_vm_fault,
88};
89
90static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
91{
92 struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
93
94 vma->vm_ops = &amdxdna_ubuf_vm_ops;
95 vma->vm_private_data = ubuf;
96 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
97
98 return 0;
99}
100
101static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
102{
103 struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
104 void *kva;
105
106 kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
107 if (!kva)
108 return -EINVAL;
109
110 iosys_map_set_vaddr(map, kva);
111 return 0;
112}
113
114static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
115{
116 vunmap(map->vaddr);
117}
118
119static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
120 .map_dma_buf = amdxdna_ubuf_map,
121 .unmap_dma_buf = amdxdna_ubuf_unmap,
122 .release = amdxdna_ubuf_release,
123 .mmap = amdxdna_ubuf_mmap,
124 .vmap = amdxdna_ubuf_vmap,
125 .vunmap = amdxdna_ubuf_vunmap,
126};
127
128struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
129 u32 num_entries, void __user *va_entries)
130{
131 struct amdxdna_dev *xdna = to_xdna_dev(dev);
132 unsigned long lock_limit, new_pinned;
133 struct amdxdna_drm_va_entry *va_ent;
134 struct amdxdna_ubuf_priv *ubuf;
135 u32 npages, start = 0;
136 struct dma_buf *dbuf;
137 int i, ret;
138 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
139
140 if (!can_do_mlock())
141 return ERR_PTR(-EPERM);
142
143 ubuf = kzalloc_obj(*ubuf);
144 if (!ubuf)
145 return ERR_PTR(-ENOMEM);
146
147 ubuf->mm = current->mm;
148 mmgrab(ubuf->mm);
149
150 va_ent = kvzalloc_objs(*va_ent, num_entries);
151 if (!va_ent) {
152 ret = -ENOMEM;
153 goto free_ubuf;
154 }
155
156 if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
157 XDNA_DBG(xdna, "Access va entries failed");
158 ret = -EINVAL;
159 goto free_ent;
160 }
161
162 for (i = 0, exp_info.size = 0; i < num_entries; i++) {
163 if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
164 !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
165 XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
166 va_ent[i].vaddr, va_ent[i].len);
167 ret = -EINVAL;
168 goto free_ent;
169 }
170
171 if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) {
172 ret = -EINVAL;
173 goto free_ent;
174 }
175 }
176
177 ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
178 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
179 new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
180 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
181 XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
182 new_pinned, lock_limit, capable(CAP_IPC_LOCK));
183 ret = -ENOMEM;
184 goto sub_pin_cnt;
185 }
186
187 ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages);
188 if (!ubuf->pages) {
189 ret = -ENOMEM;
190 goto sub_pin_cnt;
191 }
192
193 for (i = 0; i < num_entries; i++) {
194 npages = va_ent[i].len >> PAGE_SHIFT;
195
196 ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
197 FOLL_WRITE | FOLL_LONGTERM,
198 &ubuf->pages[start]);
199 if (ret < 0 || ret != npages) {
200 ret = -ENOMEM;
201 XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
202 goto destroy_pages;
203 }
204
205 start += ret;
206 }
207
208 exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
209 exp_info.priv = ubuf;
210 exp_info.flags = O_RDWR | O_CLOEXEC;
211
212 dbuf = dma_buf_export(&exp_info);
213 if (IS_ERR(dbuf)) {
214 ret = PTR_ERR(dbuf);
215 goto destroy_pages;
216 }
217 kvfree(va_ent);
218
219 return dbuf;
220
221destroy_pages:
222 if (start)
223 unpin_user_pages(ubuf->pages, start);
224 kvfree(ubuf->pages);
225sub_pin_cnt:
226 atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
227free_ent:
228 kvfree(va_ent);
229free_ubuf:
230 mmdrop(ubuf->mm);
231 kfree(ubuf);
232 return ERR_PTR(ret);
233}