Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/module.h>
9#include <linux/mutex.h>
10#include <linux/shmem_fs.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
13
14#ifdef CONFIG_X86
15#include <asm/set_memory.h>
16#endif
17
18#include <kunit/visibility.h>
19
20#include <drm/drm.h>
21#include <drm/drm_device.h>
22#include <drm/drm_drv.h>
23#include <drm/drm_dumb_buffers.h>
24#include <drm/drm_gem_shmem_helper.h>
25#include <drm/drm_prime.h>
26#include <drm/drm_print.h>
27
28MODULE_IMPORT_NS("DMA_BUF");
29
30/**
31 * DOC: overview
32 *
33 * This library provides helpers for GEM objects backed by shmem buffers
34 * allocated using anonymous pageable memory.
35 *
36 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
37 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
38 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
39 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
40 */
41
42static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
43 .free = drm_gem_shmem_object_free,
44 .print_info = drm_gem_shmem_object_print_info,
45 .pin = drm_gem_shmem_object_pin,
46 .unpin = drm_gem_shmem_object_unpin,
47 .get_sg_table = drm_gem_shmem_object_get_sg_table,
48 .vmap = drm_gem_shmem_object_vmap,
49 .vunmap = drm_gem_shmem_object_vunmap,
50 .mmap = drm_gem_shmem_object_mmap,
51 .vm_ops = &drm_gem_shmem_vm_ops,
52};
53
54static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
55 size_t size, bool private)
56{
57 struct drm_gem_object *obj = &shmem->base;
58 int ret = 0;
59
60 if (!obj->funcs)
61 obj->funcs = &drm_gem_shmem_funcs;
62
63 if (private) {
64 drm_gem_private_object_init(dev, obj, size);
65 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
66 } else {
67 ret = drm_gem_object_init(dev, obj, size);
68 }
69 if (ret) {
70 drm_gem_private_object_fini(obj);
71 return ret;
72 }
73
74 ret = drm_gem_create_mmap_offset(obj);
75 if (ret)
76 goto err_release;
77
78 INIT_LIST_HEAD(&shmem->madv_list);
79
80 if (!private) {
81 /*
82 * Our buffers are kept pinned, so allocating them
83 * from the MOVABLE zone is a really bad idea, and
84 * conflicts with CMA. See comments above new_inode()
85 * why this is required _and_ expected if you're
86 * going to pin these pages.
87 */
88 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
89 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
90 }
91
92 return 0;
93err_release:
94 drm_gem_object_release(obj);
95 return ret;
96}
97
98/**
99 * drm_gem_shmem_init - Initialize an allocated object.
100 * @dev: DRM device
101 * @shmem: shmem GEM object to initialize
102 * @size: Buffer size in bytes
103 *
104 * This function initializes an allocated shmem GEM object.
105 *
106 * Returns:
107 * 0 on success, or a negative error code on failure.
108 */
109int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
110{
111 return __drm_gem_shmem_init(dev, shmem, size, false);
112}
113EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
114
115static struct drm_gem_shmem_object *
116__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
117{
118 struct drm_gem_shmem_object *shmem;
119 struct drm_gem_object *obj;
120 int ret = 0;
121
122 size = PAGE_ALIGN(size);
123
124 if (dev->driver->gem_create_object) {
125 obj = dev->driver->gem_create_object(dev, size);
126 if (IS_ERR(obj))
127 return ERR_CAST(obj);
128 shmem = to_drm_gem_shmem_obj(obj);
129 } else {
130 shmem = kzalloc_obj(*shmem);
131 if (!shmem)
132 return ERR_PTR(-ENOMEM);
133 obj = &shmem->base;
134 }
135
136 ret = __drm_gem_shmem_init(dev, shmem, size, private);
137 if (ret) {
138 kfree(obj);
139 return ERR_PTR(ret);
140 }
141
142 return shmem;
143}
144/**
145 * drm_gem_shmem_create - Allocate an object with the given size
146 * @dev: DRM device
147 * @size: Size of the object to allocate
148 *
149 * This function creates a shmem GEM object.
150 *
151 * Returns:
152 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
153 * error code on failure.
154 */
155struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
156{
157 return __drm_gem_shmem_create(dev, size, false);
158}
159EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
160
161/**
162 * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
163 * @shmem: shmem GEM object
164 *
165 * This function cleans up the GEM object state, but does not free the memory used to store the
166 * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
167 */
168void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
169{
170 struct drm_gem_object *obj = &shmem->base;
171
172 if (drm_gem_is_imported(obj)) {
173 drm_prime_gem_destroy(obj, shmem->sgt);
174 } else {
175 dma_resv_lock(shmem->base.resv, NULL);
176
177 drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
178
179 if (shmem->sgt) {
180 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
181 DMA_BIDIRECTIONAL, 0);
182 sg_free_table(shmem->sgt);
183 kfree(shmem->sgt);
184 }
185 if (shmem->pages)
186 drm_gem_shmem_put_pages_locked(shmem);
187
188 drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
189 drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
190
191 dma_resv_unlock(shmem->base.resv);
192 }
193
194 drm_gem_object_release(obj);
195}
196EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
197
198/**
199 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
200 * @shmem: shmem GEM object to free
201 *
202 * This function cleans up the GEM object state and frees the memory used to
203 * store the object itself.
204 */
205void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
206{
207 drm_gem_shmem_release(shmem);
208 kfree(shmem);
209}
210EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
211
212static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
213{
214 struct drm_gem_object *obj = &shmem->base;
215 struct page **pages;
216
217 dma_resv_assert_held(shmem->base.resv);
218
219 if (refcount_inc_not_zero(&shmem->pages_use_count))
220 return 0;
221
222 pages = drm_gem_get_pages(obj);
223 if (IS_ERR(pages)) {
224 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
225 PTR_ERR(pages));
226 return PTR_ERR(pages);
227 }
228
229 /*
230 * TODO: Allocating WC pages which are correctly flushed is only
231 * supported on x86. Ideal solution would be a GFP_WC flag, which also
232 * ttm_pool.c could use.
233 */
234#ifdef CONFIG_X86
235 if (shmem->map_wc)
236 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
237#endif
238
239 shmem->pages = pages;
240
241 refcount_set(&shmem->pages_use_count, 1);
242
243 return 0;
244}
245
246/*
247 * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
248 * @shmem: shmem GEM object
249 *
250 * This function decreases the use count and puts the backing pages when use drops to zero.
251 */
252void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
253{
254 struct drm_gem_object *obj = &shmem->base;
255
256 dma_resv_assert_held(shmem->base.resv);
257
258 if (refcount_dec_and_test(&shmem->pages_use_count)) {
259#ifdef CONFIG_X86
260 if (shmem->map_wc)
261 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
262#endif
263
264 drm_gem_put_pages(obj, shmem->pages,
265 shmem->pages_mark_dirty_on_put,
266 shmem->pages_mark_accessed_on_put);
267 shmem->pages = NULL;
268 shmem->pages_mark_accessed_on_put = false;
269 shmem->pages_mark_dirty_on_put = false;
270 }
271}
272EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
273
274int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
275{
276 int ret;
277
278 dma_resv_assert_held(shmem->base.resv);
279
280 drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
281
282 if (refcount_inc_not_zero(&shmem->pages_pin_count))
283 return 0;
284
285 ret = drm_gem_shmem_get_pages_locked(shmem);
286 if (!ret)
287 refcount_set(&shmem->pages_pin_count, 1);
288
289 return ret;
290}
291EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
292
293void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
294{
295 dma_resv_assert_held(shmem->base.resv);
296
297 if (refcount_dec_and_test(&shmem->pages_pin_count))
298 drm_gem_shmem_put_pages_locked(shmem);
299}
300EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
301
302/**
303 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
304 * @shmem: shmem GEM object
305 *
306 * This function makes sure the backing pages are pinned in memory while the
307 * buffer is exported.
308 *
309 * Returns:
310 * 0 on success or a negative error code on failure.
311 */
312int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
313{
314 struct drm_gem_object *obj = &shmem->base;
315 int ret;
316
317 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
318
319 if (refcount_inc_not_zero(&shmem->pages_pin_count))
320 return 0;
321
322 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
323 if (ret)
324 return ret;
325 ret = drm_gem_shmem_pin_locked(shmem);
326 dma_resv_unlock(shmem->base.resv);
327
328 return ret;
329}
330EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
331
332/**
333 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
334 * @shmem: shmem GEM object
335 *
336 * This function removes the requirement that the backing pages are pinned in
337 * memory.
338 */
339void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
340{
341 struct drm_gem_object *obj = &shmem->base;
342
343 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
344
345 if (refcount_dec_not_one(&shmem->pages_pin_count))
346 return;
347
348 dma_resv_lock(shmem->base.resv, NULL);
349 drm_gem_shmem_unpin_locked(shmem);
350 dma_resv_unlock(shmem->base.resv);
351}
352EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
353
354/*
355 * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
356 * @shmem: shmem GEM object
357 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
358 * store.
359 *
360 * This function makes sure that a contiguous kernel virtual address mapping
361 * exists for the buffer backing the shmem GEM object. It hides the differences
362 * between dma-buf imported and natively allocated objects.
363 *
364 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
365 *
366 * Returns:
367 * 0 on success or a negative error code on failure.
368 */
369int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
370 struct iosys_map *map)
371{
372 struct drm_gem_object *obj = &shmem->base;
373 int ret = 0;
374
375 dma_resv_assert_held(obj->resv);
376
377 if (drm_gem_is_imported(obj)) {
378 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
379 } else {
380 pgprot_t prot = PAGE_KERNEL;
381
382 dma_resv_assert_held(shmem->base.resv);
383
384 if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
385 iosys_map_set_vaddr(map, shmem->vaddr);
386 return 0;
387 }
388
389 ret = drm_gem_shmem_pin_locked(shmem);
390 if (ret)
391 return ret;
392
393 if (shmem->map_wc)
394 prot = pgprot_writecombine(prot);
395 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
396 VM_MAP, prot);
397 if (!shmem->vaddr) {
398 ret = -ENOMEM;
399 } else {
400 iosys_map_set_vaddr(map, shmem->vaddr);
401 refcount_set(&shmem->vmap_use_count, 1);
402 shmem->pages_mark_accessed_on_put = true;
403 shmem->pages_mark_dirty_on_put = true;
404 }
405 }
406
407 if (ret) {
408 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
409 goto err_put_pages;
410 }
411
412 return 0;
413
414err_put_pages:
415 if (!drm_gem_is_imported(obj))
416 drm_gem_shmem_unpin_locked(shmem);
417
418 return ret;
419}
420EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
421
422/*
423 * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
424 * @shmem: shmem GEM object
425 * @map: Kernel virtual address where the SHMEM GEM object was mapped
426 *
427 * This function cleans up a kernel virtual address mapping acquired by
428 * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
429 * drops to zero.
430 *
431 * This function hides the differences between dma-buf imported and natively
432 * allocated objects.
433 */
434void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
435 struct iosys_map *map)
436{
437 struct drm_gem_object *obj = &shmem->base;
438
439 dma_resv_assert_held(obj->resv);
440
441 if (drm_gem_is_imported(obj)) {
442 dma_buf_vunmap(obj->import_attach->dmabuf, map);
443 } else {
444 dma_resv_assert_held(shmem->base.resv);
445
446 if (refcount_dec_and_test(&shmem->vmap_use_count)) {
447 vunmap(shmem->vaddr);
448 shmem->vaddr = NULL;
449
450 drm_gem_shmem_unpin_locked(shmem);
451 }
452 }
453}
454EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
455
456static int
457drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
458 struct drm_device *dev, size_t size,
459 uint32_t *handle)
460{
461 struct drm_gem_shmem_object *shmem;
462 int ret;
463
464 shmem = drm_gem_shmem_create(dev, size);
465 if (IS_ERR(shmem))
466 return PTR_ERR(shmem);
467
468 /*
469 * Allocate an id of idr table where the obj is registered
470 * and handle has the id what user can see.
471 */
472 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
473 /* drop reference from allocate - handle holds it now. */
474 drm_gem_object_put(&shmem->base);
475
476 return ret;
477}
478
479/* Update madvise status, returns true if not purged, else
480 * false or -errno.
481 */
482int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
483{
484 dma_resv_assert_held(shmem->base.resv);
485
486 if (shmem->madv >= 0)
487 shmem->madv = madv;
488
489 madv = shmem->madv;
490
491 return (madv >= 0);
492}
493EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
494
495void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
496{
497 struct drm_gem_object *obj = &shmem->base;
498 struct drm_device *dev = obj->dev;
499
500 dma_resv_assert_held(shmem->base.resv);
501
502 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
503
504 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
505 sg_free_table(shmem->sgt);
506 kfree(shmem->sgt);
507 shmem->sgt = NULL;
508
509 drm_gem_shmem_put_pages_locked(shmem);
510
511 shmem->madv = -1;
512
513 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
514 drm_gem_free_mmap_offset(obj);
515
516 /* Our goal here is to return as much of the memory as
517 * is possible back to the system as we are called from OOM.
518 * To do this we must instruct the shmfs to drop all of its
519 * backing pages, *now*.
520 */
521 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
522
523 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
524}
525EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
526
527/**
528 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
529 * @file: DRM file structure to create the dumb buffer for
530 * @dev: DRM device
531 * @args: IOCTL data
532 *
533 * This function computes the pitch of the dumb buffer and rounds it up to an
534 * integer number of bytes per pixel. Drivers for hardware that doesn't have
535 * any additional restrictions on the pitch can directly use this function as
536 * their &drm_driver.dumb_create callback.
537 *
538 * For hardware with additional restrictions, drivers can adjust the fields
539 * set up by userspace before calling into this function.
540 *
541 * Returns:
542 * 0 on success or a negative error code on failure.
543 */
544int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
545 struct drm_mode_create_dumb *args)
546{
547 int ret;
548
549 ret = drm_mode_size_dumb(dev, args, 0, 0);
550 if (ret)
551 return ret;
552
553 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
554}
555EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
556
557static void drm_gem_shmem_record_mkwrite(struct vm_fault *vmf)
558{
559 struct vm_area_struct *vma = vmf->vma;
560 struct drm_gem_object *obj = vma->vm_private_data;
561 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
562 loff_t num_pages = obj->size >> PAGE_SHIFT;
563 pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
564
565 if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
566 return;
567
568 file_update_time(vma->vm_file);
569 folio_mark_dirty(page_folio(shmem->pages[page_offset]));
570}
571
572static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
573 unsigned long pfn)
574{
575 if (!order) {
576 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
577#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
578 } else if (order == PMD_ORDER) {
579 unsigned long paddr = pfn << PAGE_SHIFT;
580 bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
581
582 if (aligned &&
583 folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
584 vm_fault_t ret;
585
586 pfn &= PMD_MASK >> PAGE_SHIFT;
587
588 /* Unlike PTEs which are automatically upgraded to
589 * writeable entries, the PMD upgrades go through
590 * .huge_fault(). Make sure we pass the "write" info
591 * along in that case.
592 * This also means we have to record the write fault
593 * here, instead of in .pfn_mkwrite().
594 */
595 ret = vmf_insert_pfn_pmd(vmf, pfn,
596 vmf->flags & FAULT_FLAG_WRITE);
597 if (ret == VM_FAULT_NOPAGE && (vmf->flags & FAULT_FLAG_WRITE))
598 drm_gem_shmem_record_mkwrite(vmf);
599
600 return ret;
601 }
602#endif
603 }
604 return VM_FAULT_FALLBACK;
605}
606
607static vm_fault_t drm_gem_shmem_any_fault(struct vm_fault *vmf, unsigned int order)
608{
609 struct vm_area_struct *vma = vmf->vma;
610 struct drm_gem_object *obj = vma->vm_private_data;
611 struct drm_device *dev = obj->dev;
612 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
613 loff_t num_pages = obj->size >> PAGE_SHIFT;
614 vm_fault_t ret = VM_FAULT_SIGBUS;
615 struct page **pages = shmem->pages;
616 pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
617 struct page *page;
618 struct folio *folio;
619 unsigned long pfn;
620
621 if (order && order != PMD_ORDER)
622 return VM_FAULT_FALLBACK;
623
624 dma_resv_lock(obj->resv, NULL);
625
626 if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
627 shmem->madv < 0)
628 goto out;
629
630 page = pages[page_offset];
631 if (drm_WARN_ON_ONCE(dev, !page))
632 goto out;
633 folio = page_folio(page);
634
635 pfn = page_to_pfn(page);
636
637 ret = try_insert_pfn(vmf, order, pfn);
638 if (ret == VM_FAULT_NOPAGE)
639 folio_mark_accessed(folio);
640
641out:
642 dma_resv_unlock(obj->resv);
643
644 return ret;
645}
646
647static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
648{
649 return drm_gem_shmem_any_fault(vmf, 0);
650}
651
652static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
653{
654 struct drm_gem_object *obj = vma->vm_private_data;
655 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
656
657 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
658
659 dma_resv_lock(shmem->base.resv, NULL);
660
661 /*
662 * We should have already pinned the pages when the buffer was first
663 * mmap'd, vm_open() just grabs an additional reference for the new
664 * mm the vma is getting copied into (ie. on fork()).
665 */
666 drm_WARN_ON_ONCE(obj->dev,
667 !refcount_inc_not_zero(&shmem->pages_use_count));
668
669 dma_resv_unlock(shmem->base.resv);
670
671 drm_gem_vm_open(vma);
672}
673
674static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
675{
676 struct drm_gem_object *obj = vma->vm_private_data;
677 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
678
679 dma_resv_lock(shmem->base.resv, NULL);
680 drm_gem_shmem_put_pages_locked(shmem);
681 dma_resv_unlock(shmem->base.resv);
682
683 drm_gem_vm_close(vma);
684}
685
686static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
687{
688 drm_gem_shmem_record_mkwrite(vmf);
689 return 0;
690}
691
692const struct vm_operations_struct drm_gem_shmem_vm_ops = {
693 .fault = drm_gem_shmem_fault,
694#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
695 .huge_fault = drm_gem_shmem_any_fault,
696#endif
697 .open = drm_gem_shmem_vm_open,
698 .close = drm_gem_shmem_vm_close,
699 .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
700};
701EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
702
703/**
704 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
705 * @shmem: shmem GEM object
706 * @vma: VMA for the area to be mapped
707 *
708 * This function implements an augmented version of the GEM DRM file mmap
709 * operation for shmem objects.
710 *
711 * Returns:
712 * 0 on success or a negative error code on failure.
713 */
714int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
715{
716 struct drm_gem_object *obj = &shmem->base;
717 int ret;
718
719 if (drm_gem_is_imported(obj)) {
720 /* Reset both vm_ops and vm_private_data, so we don't end up with
721 * vm_ops pointing to our implementation if the dma-buf backend
722 * doesn't set those fields.
723 */
724 vma->vm_private_data = NULL;
725 vma->vm_ops = NULL;
726
727 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
728
729 /* Drop the reference drm_gem_mmap_obj() acquired.*/
730 if (!ret)
731 drm_gem_object_put(obj);
732
733 return ret;
734 }
735
736 if (is_cow_mapping(vma->vm_flags))
737 return -EINVAL;
738
739 dma_resv_lock(shmem->base.resv, NULL);
740 ret = drm_gem_shmem_get_pages_locked(shmem);
741 dma_resv_unlock(shmem->base.resv);
742
743 if (ret)
744 return ret;
745
746 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
747 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
748 if (shmem->map_wc)
749 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
750
751 return 0;
752}
753EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
754
755/**
756 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
757 * @shmem: shmem GEM object
758 * @p: DRM printer
759 * @indent: Tab indentation level
760 */
761void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
762 struct drm_printer *p, unsigned int indent)
763{
764 if (drm_gem_is_imported(&shmem->base))
765 return;
766
767 drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
768 drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
769 drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
770 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
771}
772EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
773
774/**
775 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
776 * pages for a shmem GEM object
777 * @shmem: shmem GEM object
778 *
779 * This function exports a scatter/gather table suitable for PRIME usage by
780 * calling the standard DMA mapping API.
781 *
782 * Drivers who need to acquire an scatter/gather table for objects need to call
783 * drm_gem_shmem_get_pages_sgt() instead.
784 *
785 * Returns:
786 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
787 */
788struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
789{
790 struct drm_gem_object *obj = &shmem->base;
791
792 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
793
794 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
795}
796EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
797
798static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
799{
800 struct drm_gem_object *obj = &shmem->base;
801 int ret;
802 struct sg_table *sgt;
803
804 if (shmem->sgt)
805 return shmem->sgt;
806
807 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
808
809 ret = drm_gem_shmem_get_pages_locked(shmem);
810 if (ret)
811 return ERR_PTR(ret);
812
813 sgt = drm_gem_shmem_get_sg_table(shmem);
814 if (IS_ERR(sgt)) {
815 ret = PTR_ERR(sgt);
816 goto err_put_pages;
817 }
818 /* Map the pages for use by the h/w. */
819 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
820 if (ret)
821 goto err_free_sgt;
822
823 shmem->sgt = sgt;
824
825 return sgt;
826
827err_free_sgt:
828 sg_free_table(sgt);
829 kfree(sgt);
830err_put_pages:
831 drm_gem_shmem_put_pages_locked(shmem);
832 return ERR_PTR(ret);
833}
834
835/**
836 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
837 * scatter/gather table for a shmem GEM object.
838 * @shmem: shmem GEM object
839 *
840 * This function returns a scatter/gather table suitable for driver usage. If
841 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
842 * table created.
843 *
844 * This is the main function for drivers to get at backing storage, and it hides
845 * and difference between dma-buf imported and natively allocated objects.
846 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
847 *
848 * Returns:
849 * A pointer to the scatter/gather table of pinned pages or errno on failure.
850 */
851struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
852{
853 int ret;
854 struct sg_table *sgt;
855
856 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
857 if (ret)
858 return ERR_PTR(ret);
859 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
860 dma_resv_unlock(shmem->base.resv);
861
862 return sgt;
863}
864EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
865
866/**
867 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
868 * another driver's scatter/gather table of pinned pages
869 * @dev: Device to import into
870 * @attach: DMA-BUF attachment
871 * @sgt: Scatter/gather table of pinned pages
872 *
873 * This function imports a scatter/gather table exported via DMA-BUF by
874 * another driver. Drivers that use the shmem helpers should set this as their
875 * &drm_driver.gem_prime_import_sg_table callback.
876 *
877 * Returns:
878 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
879 * error code on failure.
880 */
881struct drm_gem_object *
882drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
883 struct dma_buf_attachment *attach,
884 struct sg_table *sgt)
885{
886 size_t size = PAGE_ALIGN(attach->dmabuf->size);
887 struct drm_gem_shmem_object *shmem;
888
889 shmem = __drm_gem_shmem_create(dev, size, true);
890 if (IS_ERR(shmem))
891 return ERR_CAST(shmem);
892
893 shmem->sgt = sgt;
894
895 drm_dbg_prime(dev, "size = %zu\n", size);
896
897 return &shmem->base;
898}
899EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
900
901/**
902 * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
903 * @dev: Device to import into
904 * @dma_buf: dma-buf object to import
905 *
906 * Drivers that use the shmem helpers but also wants to import dmabuf without
907 * mapping its sg_table can use this as their &drm_driver.gem_prime_import
908 * implementation.
909 */
910struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
911 struct dma_buf *dma_buf)
912{
913 struct dma_buf_attachment *attach;
914 struct drm_gem_shmem_object *shmem;
915 struct drm_gem_object *obj;
916 size_t size;
917 int ret;
918
919 if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
920 /*
921 * Importing dmabuf exported from our own gem increases
922 * refcount on gem itself instead of f_count of dmabuf.
923 */
924 obj = dma_buf->priv;
925 drm_gem_object_get(obj);
926 return obj;
927 }
928
929 attach = dma_buf_attach(dma_buf, dev->dev);
930 if (IS_ERR(attach))
931 return ERR_CAST(attach);
932
933 get_dma_buf(dma_buf);
934
935 size = PAGE_ALIGN(attach->dmabuf->size);
936
937 shmem = __drm_gem_shmem_create(dev, size, true);
938 if (IS_ERR(shmem)) {
939 ret = PTR_ERR(shmem);
940 goto fail_detach;
941 }
942
943 drm_dbg_prime(dev, "size = %zu\n", size);
944
945 shmem->base.import_attach = attach;
946 shmem->base.resv = dma_buf->resv;
947
948 return &shmem->base;
949
950fail_detach:
951 dma_buf_detach(dma_buf, attach);
952 dma_buf_put(dma_buf);
953
954 return ERR_PTR(ret);
955}
956EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
957
958/*
959 * Kunit helpers
960 */
961
962#if IS_ENABLED(CONFIG_KUNIT)
963int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
964{
965 struct drm_gem_object *obj = &shmem->base;
966 int ret;
967
968 ret = dma_resv_lock_interruptible(obj->resv, NULL);
969 if (ret)
970 return ret;
971 ret = drm_gem_shmem_vmap_locked(shmem, map);
972 dma_resv_unlock(obj->resv);
973
974 return ret;
975}
976EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vmap);
977
978void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
979{
980 struct drm_gem_object *obj = &shmem->base;
981
982 dma_resv_lock_interruptible(obj->resv, NULL);
983 drm_gem_shmem_vunmap_locked(shmem, map);
984 dma_resv_unlock(obj->resv);
985}
986EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
987
988int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
989{
990 struct drm_gem_object *obj = &shmem->base;
991 int ret;
992
993 ret = dma_resv_lock_interruptible(obj->resv, NULL);
994 if (ret)
995 return ret;
996 ret = drm_gem_shmem_madvise_locked(shmem, madv);
997 dma_resv_unlock(obj->resv);
998
999 return ret;
1000}
1001EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
1002
1003int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
1004{
1005 struct drm_gem_object *obj = &shmem->base;
1006 int ret;
1007
1008 ret = dma_resv_lock_interruptible(obj->resv, NULL);
1009 if (ret)
1010 return ret;
1011 drm_gem_shmem_purge_locked(shmem);
1012 dma_resv_unlock(obj->resv);
1013
1014 return 0;
1015}
1016EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_purge);
1017#endif
1018
1019MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
1020MODULE_IMPORT_NS("DMA_BUF");
1021MODULE_LICENSE("GPL");