Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drm gem DMA helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 *
7 * Based on Samsung Exynos code
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 */
11
12#include <linux/dma-buf.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
19
20#include <drm/drm.h>
21#include <drm/drm_device.h>
22#include <drm/drm_drv.h>
23#include <drm/drm_dumb_buffers.h>
24#include <drm/drm_gem_dma_helper.h>
25#include <drm/drm_print.h>
26#include <drm/drm_vma_manager.h>
27
28/**
29 * DOC: dma helpers
30 *
31 * The DRM GEM/DMA helpers are a means to provide buffer objects that are
32 * presented to the device as a contiguous chunk of memory. This is useful
33 * for devices that do not support scatter-gather DMA (either directly or
34 * by using an intimately attached IOMMU).
35 *
36 * For devices that access the memory bus through an (external) IOMMU then
37 * the buffer objects are allocated using a traditional page-based
38 * allocator and may be scattered through physical memory. However they
39 * are contiguous in the IOVA space so appear contiguous to devices using
40 * them.
41 *
42 * For other devices then the helpers rely on CMA to provide buffer
43 * objects that are physically contiguous in memory.
44 *
45 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
46 * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
47 * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
48 */
49
50static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
51 .free = drm_gem_dma_object_free,
52 .print_info = drm_gem_dma_object_print_info,
53 .get_sg_table = drm_gem_dma_object_get_sg_table,
54 .vmap = drm_gem_dma_object_vmap,
55 .mmap = drm_gem_dma_object_mmap,
56 .vm_ops = &drm_gem_dma_vm_ops,
57};
58
59/**
60 * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
61 * @drm: DRM device
62 * @size: size of the object to allocate
63 * @private: true if used for internal purposes
64 *
65 * This function creates and initializes a GEM DMA object of the given size,
66 * but doesn't allocate any memory to back the object.
67 *
68 * Returns:
69 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
70 * error code on failure.
71 */
72static struct drm_gem_dma_object *
73__drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
74{
75 struct drm_gem_dma_object *dma_obj;
76 struct drm_gem_object *gem_obj;
77 int ret = 0;
78
79 if (drm->driver->gem_create_object) {
80 gem_obj = drm->driver->gem_create_object(drm, size);
81 if (IS_ERR(gem_obj))
82 return ERR_CAST(gem_obj);
83 dma_obj = to_drm_gem_dma_obj(gem_obj);
84 } else {
85 dma_obj = kzalloc_obj(*dma_obj);
86 if (!dma_obj)
87 return ERR_PTR(-ENOMEM);
88 gem_obj = &dma_obj->base;
89 }
90
91 if (!gem_obj->funcs)
92 gem_obj->funcs = &drm_gem_dma_default_funcs;
93
94 if (private) {
95 drm_gem_private_object_init(drm, gem_obj, size);
96
97 /* Always use writecombine for dma-buf mappings */
98 dma_obj->map_noncoherent = false;
99 } else {
100 ret = drm_gem_object_init(drm, gem_obj, size);
101 }
102 if (ret)
103 goto error;
104
105 ret = drm_gem_create_mmap_offset(gem_obj);
106 if (ret) {
107 drm_gem_object_release(gem_obj);
108 goto error;
109 }
110
111 return dma_obj;
112
113error:
114 kfree(dma_obj);
115 return ERR_PTR(ret);
116}
117
118/**
119 * drm_gem_dma_create - allocate an object with the given size
120 * @drm: DRM device
121 * @size: size of the object to allocate
122 *
123 * This function creates a DMA GEM object and allocates memory as backing store.
124 * The allocated memory will occupy a contiguous chunk of bus address space.
125 *
126 * For devices that are directly connected to the memory bus then the allocated
127 * memory will be physically contiguous. For devices that access through an
128 * IOMMU, then the allocated memory is not expected to be physically contiguous
129 * because having contiguous IOVAs is sufficient to meet a devices DMA
130 * requirements.
131 *
132 * Returns:
133 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
134 * error code on failure.
135 */
136struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
137 size_t size)
138{
139 struct drm_gem_dma_object *dma_obj;
140 int ret;
141
142 size = round_up(size, PAGE_SIZE);
143
144 dma_obj = __drm_gem_dma_create(drm, size, false);
145 if (IS_ERR(dma_obj))
146 return dma_obj;
147
148 if (dma_obj->map_noncoherent) {
149 dma_obj->vaddr = dma_alloc_noncoherent(drm_dev_dma_dev(drm),
150 size,
151 &dma_obj->dma_addr,
152 DMA_TO_DEVICE,
153 GFP_KERNEL | __GFP_NOWARN);
154 } else {
155 dma_obj->vaddr = dma_alloc_wc(drm_dev_dma_dev(drm), size,
156 &dma_obj->dma_addr,
157 GFP_KERNEL | __GFP_NOWARN);
158 }
159 if (!dma_obj->vaddr) {
160 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
161 size);
162 ret = -ENOMEM;
163 goto error;
164 }
165
166 return dma_obj;
167
168error:
169 drm_gem_object_put(&dma_obj->base);
170 return ERR_PTR(ret);
171}
172EXPORT_SYMBOL_GPL(drm_gem_dma_create);
173
174/**
175 * drm_gem_dma_create_with_handle - allocate an object with the given size and
176 * return a GEM handle to it
177 * @file_priv: DRM file-private structure to register the handle for
178 * @drm: DRM device
179 * @size: size of the object to allocate
180 * @handle: return location for the GEM handle
181 *
182 * This function creates a DMA GEM object, allocating a chunk of memory as
183 * backing store. The GEM object is then added to the list of object associated
184 * with the given file and a handle to it is returned.
185 *
186 * The allocated memory will occupy a contiguous chunk of bus address space.
187 * See drm_gem_dma_create() for more details.
188 *
189 * Returns:
190 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
191 * error code on failure.
192 */
193static struct drm_gem_dma_object *
194drm_gem_dma_create_with_handle(struct drm_file *file_priv,
195 struct drm_device *drm, size_t size,
196 uint32_t *handle)
197{
198 struct drm_gem_dma_object *dma_obj;
199 struct drm_gem_object *gem_obj;
200 int ret;
201
202 dma_obj = drm_gem_dma_create(drm, size);
203 if (IS_ERR(dma_obj))
204 return dma_obj;
205
206 gem_obj = &dma_obj->base;
207
208 /*
209 * allocate a id of idr table where the obj is registered
210 * and handle has the id what user can see.
211 */
212 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
213 /* drop reference from allocate - handle holds it now. */
214 drm_gem_object_put(gem_obj);
215 if (ret)
216 return ERR_PTR(ret);
217
218 return dma_obj;
219}
220
221/**
222 * drm_gem_dma_free - free resources associated with a DMA GEM object
223 * @dma_obj: DMA GEM object to free
224 *
225 * This function frees the backing memory of the DMA GEM object, cleans up the
226 * GEM object state and frees the memory used to store the object itself.
227 * If the buffer is imported and the virtual address is set, it is released.
228 */
229void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
230{
231 struct drm_gem_object *gem_obj = &dma_obj->base;
232 struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
233
234 if (drm_gem_is_imported(gem_obj)) {
235 if (dma_obj->vaddr)
236 dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
237 drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
238 } else if (dma_obj->vaddr) {
239 if (dma_obj->map_noncoherent)
240 dma_free_noncoherent(drm_dev_dma_dev(gem_obj->dev),
241 dma_obj->base.size,
242 dma_obj->vaddr, dma_obj->dma_addr,
243 DMA_TO_DEVICE);
244 else
245 dma_free_wc(drm_dev_dma_dev(gem_obj->dev),
246 dma_obj->base.size, dma_obj->vaddr,
247 dma_obj->dma_addr);
248 }
249
250 drm_gem_object_release(gem_obj);
251
252 kfree(dma_obj);
253}
254EXPORT_SYMBOL_GPL(drm_gem_dma_free);
255
256/**
257 * drm_gem_dma_dumb_create_internal - create a dumb buffer object
258 * @file_priv: DRM file-private structure to create the dumb buffer for
259 * @drm: DRM device
260 * @args: IOCTL data
261 *
262 * This aligns the pitch and size arguments to the minimum required. This is
263 * an internal helper that can be wrapped by a driver to account for hardware
264 * with more specific alignment requirements. It should not be used directly
265 * as their &drm_driver.dumb_create callback.
266 *
267 * Returns:
268 * 0 on success or a negative error code on failure.
269 */
270int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
271 struct drm_device *drm,
272 struct drm_mode_create_dumb *args)
273{
274 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
275 struct drm_gem_dma_object *dma_obj;
276
277 if (args->pitch < min_pitch)
278 args->pitch = min_pitch;
279
280 if (args->size < args->pitch * args->height)
281 args->size = args->pitch * args->height;
282
283 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
284 &args->handle);
285 return PTR_ERR_OR_ZERO(dma_obj);
286}
287EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
288
289/**
290 * drm_gem_dma_dumb_create - create a dumb buffer object
291 * @file_priv: DRM file-private structure to create the dumb buffer for
292 * @drm: DRM device
293 * @args: IOCTL data
294 *
295 * This function computes the pitch of the dumb buffer and rounds it up to an
296 * integer number of bytes per pixel. Drivers for hardware that doesn't have
297 * any additional restrictions on the pitch can directly use this function as
298 * their &drm_driver.dumb_create callback.
299 *
300 * For hardware with additional restrictions, drivers can adjust the fields
301 * set up by userspace and pass the IOCTL data along to the
302 * drm_gem_dma_dumb_create_internal() function.
303 *
304 * Returns:
305 * 0 on success or a negative error code on failure.
306 */
307int drm_gem_dma_dumb_create(struct drm_file *file_priv,
308 struct drm_device *drm,
309 struct drm_mode_create_dumb *args)
310{
311 struct drm_gem_dma_object *dma_obj;
312 int ret;
313
314 ret = drm_mode_size_dumb(drm, args, 0, 0);
315 if (ret)
316 return ret;
317
318 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
319 &args->handle);
320 return PTR_ERR_OR_ZERO(dma_obj);
321}
322EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
323
324const struct vm_operations_struct drm_gem_dma_vm_ops = {
325 .open = drm_gem_vm_open,
326 .close = drm_gem_vm_close,
327};
328EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
329
330#ifndef CONFIG_MMU
331/**
332 * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
333 * @filp: file object
334 * @addr: memory address
335 * @len: buffer size
336 * @pgoff: page offset
337 * @flags: memory flags
338 *
339 * This function is used in noMMU platforms to propose address mapping
340 * for a given buffer.
341 * It's intended to be used as a direct handler for the struct
342 * &file_operations.get_unmapped_area operation.
343 *
344 * Returns:
345 * mapping address on success or a negative error code on failure.
346 */
347unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
348 unsigned long addr,
349 unsigned long len,
350 unsigned long pgoff,
351 unsigned long flags)
352{
353 struct drm_gem_dma_object *dma_obj;
354 struct drm_gem_object *obj = NULL;
355 struct drm_file *priv = filp->private_data;
356 struct drm_device *dev = priv->minor->dev;
357 struct drm_vma_offset_node *node;
358
359 if (drm_dev_is_unplugged(dev))
360 return -ENODEV;
361
362 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
363 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
364 pgoff,
365 len >> PAGE_SHIFT);
366 if (likely(node)) {
367 obj = container_of(node, struct drm_gem_object, vma_node);
368 /*
369 * When the object is being freed, after it hits 0-refcnt it
370 * proceeds to tear down the object. In the process it will
371 * attempt to remove the VMA offset and so acquire this
372 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
373 * that matches our range, we know it is in the process of being
374 * destroyed and will be freed as soon as we release the lock -
375 * so we have to check for the 0-refcnted object and treat it as
376 * invalid.
377 */
378 if (!kref_get_unless_zero(&obj->refcount))
379 obj = NULL;
380 }
381
382 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
383
384 if (!obj)
385 return -EINVAL;
386
387 if (!drm_vma_node_is_allowed(node, priv)) {
388 drm_gem_object_put(obj);
389 return -EACCES;
390 }
391
392 dma_obj = to_drm_gem_dma_obj(obj);
393
394 drm_gem_object_put(obj);
395
396 return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
397}
398EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
399#endif
400
401/**
402 * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
403 * @dma_obj: DMA GEM object
404 * @p: DRM printer
405 * @indent: Tab indentation level
406 *
407 * This function prints dma_addr and vaddr for use in e.g. debugfs output.
408 */
409void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
410 struct drm_printer *p, unsigned int indent)
411{
412 drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
413 drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
414}
415EXPORT_SYMBOL(drm_gem_dma_print_info);
416
417/**
418 * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
419 * pages for a DMA GEM object
420 * @dma_obj: DMA GEM object
421 *
422 * This function exports a scatter/gather table by calling the standard
423 * DMA mapping API.
424 *
425 * Returns:
426 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
427 */
428struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
429{
430 struct drm_gem_object *obj = &dma_obj->base;
431 struct sg_table *sgt;
432 int ret;
433
434 sgt = kzalloc_obj(*sgt);
435 if (!sgt)
436 return ERR_PTR(-ENOMEM);
437
438 ret = dma_get_sgtable(drm_dev_dma_dev(obj->dev), sgt, dma_obj->vaddr,
439 dma_obj->dma_addr, obj->size);
440 if (ret < 0)
441 goto out;
442
443 return sgt;
444
445out:
446 kfree(sgt);
447 return ERR_PTR(ret);
448}
449EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
450
451/**
452 * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
453 * driver's scatter/gather table of pinned pages
454 * @dev: device to import into
455 * @attach: DMA-BUF attachment
456 * @sgt: scatter/gather table of pinned pages
457 *
458 * This function imports a scatter/gather table exported via DMA-BUF by
459 * another driver. Imported buffers must be physically contiguous in memory
460 * (i.e. the scatter/gather table must contain a single entry). Drivers that
461 * use the DMA helpers should set this as their
462 * &drm_driver.gem_prime_import_sg_table callback.
463 *
464 * Returns:
465 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
466 * error code on failure.
467 */
468struct drm_gem_object *
469drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
470 struct dma_buf_attachment *attach,
471 struct sg_table *sgt)
472{
473 struct drm_gem_dma_object *dma_obj;
474
475 /* check if the entries in the sg_table are contiguous */
476 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
477 return ERR_PTR(-EINVAL);
478
479 /* Create a DMA GEM buffer. */
480 dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
481 if (IS_ERR(dma_obj))
482 return ERR_CAST(dma_obj);
483
484 dma_obj->dma_addr = sg_dma_address(sgt->sgl);
485 dma_obj->sgt = sgt;
486
487 drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
488 attach->dmabuf->size);
489
490 return &dma_obj->base;
491}
492EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
493
494/**
495 * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
496 * address space
497 * @dma_obj: DMA GEM object
498 * @map: Returns the kernel virtual address of the DMA GEM object's backing
499 * store.
500 *
501 * This function maps a buffer into the kernel's virtual address space.
502 * Since the DMA buffers are already mapped into the kernel virtual address
503 * space this simply returns the cached virtual address.
504 *
505 * Returns:
506 * 0 on success, or a negative error code otherwise.
507 */
508int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
509 struct iosys_map *map)
510{
511 iosys_map_set_vaddr(map, dma_obj->vaddr);
512
513 return 0;
514}
515EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
516
517/**
518 * drm_gem_dma_mmap - memory-map an exported DMA GEM object
519 * @dma_obj: DMA GEM object
520 * @vma: VMA for the area to be mapped
521 *
522 * This function maps a buffer into a userspace process's address space.
523 * In addition to the usual GEM VMA setup it immediately faults in the entire
524 * object instead of using on-demand faulting.
525 *
526 * Returns:
527 * 0 on success or a negative error code on failure.
528 */
529int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
530{
531 struct drm_gem_object *obj = &dma_obj->base;
532 int ret;
533
534 /*
535 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
536 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
537 * the whole buffer.
538 */
539 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
540 vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, VM_PFNMAP);
541
542 if (dma_obj->map_noncoherent) {
543 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
544
545 ret = dma_mmap_pages(drm_dev_dma_dev(dma_obj->base.dev),
546 vma, vma->vm_end - vma->vm_start,
547 virt_to_page(dma_obj->vaddr));
548 } else {
549 ret = dma_mmap_wc(drm_dev_dma_dev(dma_obj->base.dev), vma,
550 dma_obj->vaddr, dma_obj->dma_addr,
551 vma->vm_end - vma->vm_start);
552 }
553 if (ret)
554 drm_gem_vm_close(vma);
555
556 return ret;
557}
558EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
559
560/**
561 * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
562 * scatter/gather table and get the virtual address of the buffer
563 * @dev: DRM device
564 * @attach: DMA-BUF attachment
565 * @sgt: Scatter/gather table of pinned pages
566 *
567 * This function imports a scatter/gather table using
568 * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
569 * virtual address. This ensures that a DMA GEM object always has its virtual
570 * address set. This address is released when the object is freed.
571 *
572 * This function can be used as the &drm_driver.gem_prime_import_sg_table
573 * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
574 * the necessary DRM driver operations.
575 *
576 * Returns:
577 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
578 * error code on failure.
579 */
580struct drm_gem_object *
581drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
582 struct dma_buf_attachment *attach,
583 struct sg_table *sgt)
584{
585 struct drm_gem_dma_object *dma_obj;
586 struct drm_gem_object *obj;
587 struct iosys_map map;
588 int ret;
589
590 ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
591 if (ret) {
592 drm_err(dev, "Failed to vmap PRIME buffer\n");
593 return ERR_PTR(ret);
594 }
595
596 obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
597 if (IS_ERR(obj)) {
598 dma_buf_vunmap_unlocked(attach->dmabuf, &map);
599 return obj;
600 }
601
602 dma_obj = to_drm_gem_dma_obj(obj);
603 dma_obj->vaddr = map.vaddr;
604
605 return obj;
606}
607EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
608
609MODULE_DESCRIPTION("DRM DMA memory-management helpers");
610MODULE_IMPORT_NS("DMA_BUF");
611MODULE_LICENSE("GPL");