Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2015 Intel Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef _DMA_BUF_UAPI_H_
21#define _DMA_BUF_UAPI_H_
22
23#include <linux/ioctl.h>
24#include <linux/types.h>
25
26/**
27 * struct dma_buf_sync - Synchronize with CPU access.
28 *
29 * When a DMA buffer is accessed from the CPU via mmap, it is not always
30 * possible to guarantee coherency between the CPU-visible map and underlying
31 * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
32 * any CPU access to give the kernel the chance to shuffle memory around if
33 * needed.
34 *
35 * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
36 * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the
37 * access is complete, the client should call DMA_BUF_IOCTL_SYNC with
38 * DMA_BUF_SYNC_END and the same read/write flags.
39 *
40 * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
41 * coherency. It does not prevent other processes or devices from
42 * accessing the memory at the same time. If synchronization with a GPU or
43 * other device driver is required, it is the client's responsibility to
44 * wait for buffer to be ready for reading or writing before calling this
45 * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that
46 * follow-up work is not submitted to GPU or other device driver until
47 * after this ioctl has been called with DMA_BUF_SYNC_END?
48 *
49 * If the driver or API with which the client is interacting uses implicit
50 * synchronization, waiting for prior work to complete can be done via
51 * poll() on the DMA buffer file descriptor. If the driver or API requires
52 * explicit synchronization, the client may have to wait on a sync_file or
53 * other synchronization primitive outside the scope of the DMA buffer API.
54 */
55struct dma_buf_sync {
56 /**
57 * @flags: Set of access flags
58 *
59 * DMA_BUF_SYNC_START:
60 * Indicates the start of a map access session.
61 *
62 * DMA_BUF_SYNC_END:
63 * Indicates the end of a map access session.
64 *
65 * DMA_BUF_SYNC_READ:
66 * Indicates that the mapped DMA buffer will be read by the
67 * client via the CPU map.
68 *
69 * DMA_BUF_SYNC_WRITE:
70 * Indicates that the mapped DMA buffer will be written by the
71 * client via the CPU map.
72 *
73 * DMA_BUF_SYNC_RW:
74 * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
75 */
76 __u64 flags;
77};
78
79#define DMA_BUF_SYNC_READ (1 << 0)
80#define DMA_BUF_SYNC_WRITE (2 << 0)
81#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
82#define DMA_BUF_SYNC_START (0 << 2)
83#define DMA_BUF_SYNC_END (1 << 2)
84#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
85 (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
86
87#define DMA_BUF_NAME_LEN 32
88
89/**
90 * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
91 *
92 * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
93 * current set of fences on a dma-buf file descriptor as a sync_file. CPU
94 * waits via poll() or other driver-specific mechanisms typically wait on
95 * whatever fences are on the dma-buf at the time the wait begins. This
96 * is similar except that it takes a snapshot of the current fences on the
97 * dma-buf for waiting later instead of waiting immediately. This is
98 * useful for modern graphics APIs such as Vulkan which assume an explicit
99 * synchronization model but still need to inter-operate with dma-buf.
100 *
101 * The intended usage pattern is the following:
102 *
103 * 1. Export a sync_file with flags corresponding to the expected GPU usage
104 * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
105 *
106 * 2. Submit rendering work which uses the dma-buf. The work should wait on
107 * the exported sync file before rendering and produce another sync_file
108 * when complete.
109 *
110 * 3. Import the rendering-complete sync_file into the dma-buf with flags
111 * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
112 *
113 * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
114 * the above is not a single atomic operation. If userspace wants to ensure
115 * ordering via these fences, it is the respnosibility of userspace to use
116 * locks or other mechanisms to ensure that no other context adds fences or
117 * submits work between steps 1 and 3 above.
118 */
119struct dma_buf_export_sync_file {
120 /**
121 * @flags: Read/write flags
122 *
123 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
124 *
125 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
126 * the returned sync file waits on any writers of the dma-buf to
127 * complete. Waiting on the returned sync file is equivalent to
128 * poll() with POLLIN.
129 *
130 * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
131 * any users of the dma-buf (read or write) to complete. Waiting
132 * on the returned sync file is equivalent to poll() with POLLOUT.
133 * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
134 * is equivalent to just DMA_BUF_SYNC_WRITE.
135 */
136 __u32 flags;
137 /** @fd: Returned sync file descriptor */
138 __s32 fd;
139};
140
141/**
142 * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
143 *
144 * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
145 * sync_file into a dma-buf for the purposes of implicit synchronization
146 * with other dma-buf consumers. This allows clients using explicitly
147 * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
148 * which expect implicit synchronization such as OpenGL or most media
149 * drivers/video.
150 */
151struct dma_buf_import_sync_file {
152 /**
153 * @flags: Read/write flags
154 *
155 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
156 *
157 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
158 * this inserts the sync_file as a read-only fence. Any subsequent
159 * implicitly synchronized writes to this dma-buf will wait on this
160 * fence but reads will not.
161 *
162 * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
163 * write fence. All subsequent implicitly synchronized access to
164 * this dma-buf will wait on this fence.
165 */
166 __u32 flags;
167 /** @fd: Sync file descriptor */
168 __s32 fd;
169};
170
171#define DMA_BUF_BASE 'b'
172#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
173
174/* 32/64bitness of this uapi was botched in android, there's no difference
175 * between them in actual uapi, they're just different numbers.
176 */
177#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
178#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
179#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
180#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
181#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
182
183#endif