Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This header is for implementations of dma_map_ops and related code.
4 * It should not be included in drivers just using the DMA API.
5 */
6#ifndef _LINUX_DMA_MAP_OPS_H
7#define _LINUX_DMA_MAP_OPS_H
8
9#include <linux/dma-mapping.h>
10#include <linux/pgtable.h>
11#include <linux/slab.h>
12
13struct cma;
14struct iommu_ops;
15
16struct dma_map_ops {
17 void *(*alloc)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp,
19 unsigned long attrs);
20 void (*free)(struct device *dev, size_t size, void *vaddr,
21 dma_addr_t dma_handle, unsigned long attrs);
22 struct page *(*alloc_pages_op)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, enum dma_data_direction dir,
24 gfp_t gfp);
25 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
26 dma_addr_t dma_handle, enum dma_data_direction dir);
27 int (*mmap)(struct device *, struct vm_area_struct *,
28 void *, dma_addr_t, size_t, unsigned long attrs);
29
30 int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
31 void *cpu_addr, dma_addr_t dma_addr, size_t size,
32 unsigned long attrs);
33
34 dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys,
35 size_t size, enum dma_data_direction dir,
36 unsigned long attrs);
37 void (*unmap_phys)(struct device *dev, dma_addr_t dma_handle,
38 size_t size, enum dma_data_direction dir,
39 unsigned long attrs);
40 /*
41 * map_sg should return a negative error code on error. See
42 * dma_map_sgtable() for a list of appropriate error codes
43 * and their meanings.
44 */
45 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
46 enum dma_data_direction dir, unsigned long attrs);
47 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
48 enum dma_data_direction dir, unsigned long attrs);
49 void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
50 size_t size, enum dma_data_direction dir);
51 void (*sync_single_for_device)(struct device *dev,
52 dma_addr_t dma_handle, size_t size,
53 enum dma_data_direction dir);
54 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
55 int nents, enum dma_data_direction dir);
56 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
57 int nents, enum dma_data_direction dir);
58 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
59 enum dma_data_direction direction);
60 int (*dma_supported)(struct device *dev, u64 mask);
61 u64 (*get_required_mask)(struct device *dev);
62 size_t (*max_mapping_size)(struct device *dev);
63 size_t (*opt_mapping_size)(void);
64 unsigned long (*get_merge_boundary)(struct device *dev);
65};
66
67#ifdef CONFIG_ARCH_HAS_DMA_OPS
68#include <asm/dma-mapping.h>
69
70static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
71{
72 if (dev->dma_ops)
73 return dev->dma_ops;
74 return get_arch_dma_ops();
75}
76
77static inline void set_dma_ops(struct device *dev,
78 const struct dma_map_ops *dma_ops)
79{
80 dev->dma_ops = dma_ops;
81}
82#else /* CONFIG_ARCH_HAS_DMA_OPS */
83static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
84{
85 return NULL;
86}
87static inline void set_dma_ops(struct device *dev,
88 const struct dma_map_ops *dma_ops)
89{
90}
91#endif /* CONFIG_ARCH_HAS_DMA_OPS */
92
93#ifdef CONFIG_DMA_CMA
94struct cma *dev_get_cma_area(struct device *dev);
95struct cma *dma_contiguous_get_area_by_idx(unsigned int idx);
96
97void dma_contiguous_reserve(phys_addr_t addr_limit);
98int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
99 phys_addr_t limit, struct cma **res_cma, bool fixed);
100
101struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
102 unsigned int order, bool no_warn);
103bool dma_release_from_contiguous(struct device *dev, struct page *pages,
104 int count);
105struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
106void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
107
108void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
109#else /* CONFIG_DMA_CMA */
110static inline struct cma *dev_get_cma_area(struct device *dev)
111{
112 return NULL;
113}
114static inline struct cma *dma_contiguous_get_area_by_idx(unsigned int idx)
115{
116 return NULL;
117}
118static inline void dma_contiguous_reserve(phys_addr_t limit)
119{
120}
121static inline int dma_contiguous_reserve_area(phys_addr_t size,
122 phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
123 bool fixed)
124{
125 return -ENOSYS;
126}
127static inline struct page *dma_alloc_from_contiguous(struct device *dev,
128 size_t count, unsigned int order, bool no_warn)
129{
130 return NULL;
131}
132static inline bool dma_release_from_contiguous(struct device *dev,
133 struct page *pages, int count)
134{
135 return false;
136}
137/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
138static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
139 gfp_t gfp)
140{
141 return NULL;
142}
143static inline void dma_free_contiguous(struct device *dev, struct page *page,
144 size_t size)
145{
146 __free_pages(page, get_order(size));
147}
148#endif /* CONFIG_DMA_CMA*/
149
150#ifdef CONFIG_DMA_DECLARE_COHERENT
151int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
152 dma_addr_t device_addr, size_t size);
153void dma_release_coherent_memory(struct device *dev);
154int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
155 dma_addr_t *dma_handle, void **ret);
156int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
157int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
158 void *cpu_addr, size_t size, int *ret);
159#else
160static inline int dma_declare_coherent_memory(struct device *dev,
161 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
162{
163 return -ENOSYS;
164}
165
166#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
167#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
168#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
169static inline void dma_release_coherent_memory(struct device *dev) { }
170#endif /* CONFIG_DMA_DECLARE_COHERENT */
171
172#ifdef CONFIG_DMA_GLOBAL_POOL
173void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
174 dma_addr_t *dma_handle);
175int dma_release_from_global_coherent(int order, void *vaddr);
176int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
177 size_t size, int *ret);
178int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
179#else
180static inline void *dma_alloc_from_global_coherent(struct device *dev,
181 ssize_t size, dma_addr_t *dma_handle)
182{
183 return NULL;
184}
185static inline int dma_release_from_global_coherent(int order, void *vaddr)
186{
187 return 0;
188}
189static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
190 void *cpu_addr, size_t size, int *ret)
191{
192 return 0;
193}
194#endif /* CONFIG_DMA_GLOBAL_POOL */
195
196int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
197 void *cpu_addr, dma_addr_t dma_addr, size_t size,
198 unsigned long attrs);
199int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
200 void *cpu_addr, dma_addr_t dma_addr, size_t size,
201 unsigned long attrs);
202struct page *dma_common_alloc_pages(struct device *dev, size_t size,
203 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
204void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
205 dma_addr_t dma_handle, enum dma_data_direction dir);
206
207struct page **dma_common_find_pages(void *cpu_addr);
208void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
209 const void *caller);
210void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
211 const void *caller);
212void dma_common_free_remap(void *cpu_addr, size_t size);
213
214struct page *dma_alloc_from_pool(struct device *dev, size_t size,
215 void **cpu_addr, gfp_t flags,
216 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
217bool dma_free_from_pool(struct device *dev, void *start, size_t size);
218
219int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
220 dma_addr_t dma_start, u64 size);
221
222#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
223 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
224 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
225extern bool dma_default_coherent;
226static inline bool dev_is_dma_coherent(struct device *dev)
227{
228 return dev->dma_coherent;
229}
230#else
231#define dma_default_coherent true
232
233static inline bool dev_is_dma_coherent(struct device *dev)
234{
235 return true;
236}
237#endif
238
239static inline void dma_reset_need_sync(struct device *dev)
240{
241#ifdef CONFIG_DMA_NEED_SYNC
242 /* Reset it only once so that the function can be called on hotpath */
243 if (unlikely(dev->dma_skip_sync))
244 dev->dma_skip_sync = false;
245#endif
246}
247
248/*
249 * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
250 */
251static inline bool dma_kmalloc_safe(struct device *dev,
252 enum dma_data_direction dir)
253{
254 /*
255 * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
256 * caches have already been aligned to a DMA-safe size.
257 */
258 if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
259 return true;
260
261 /*
262 * kmalloc() buffers are DMA-safe irrespective of size if the device
263 * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
264 * cache maintenance and benign cache line evictions).
265 */
266 if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
267 return true;
268
269 return false;
270}
271
272/*
273 * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
274 * sufficiently aligned for non-coherent DMA.
275 */
276static inline bool dma_kmalloc_size_aligned(size_t size)
277{
278 /*
279 * Larger kmalloc() sizes are guaranteed to be aligned to
280 * ARCH_DMA_MINALIGN.
281 */
282 if (size >= 2 * ARCH_DMA_MINALIGN ||
283 IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
284 return true;
285
286 return false;
287}
288
289/*
290 * Check whether the given object size may have originated from a kmalloc()
291 * buffer with a slab alignment below the DMA-safe alignment and needs
292 * bouncing for non-coherent DMA. The pointer alignment is not considered and
293 * in-structure DMA-safe offsets are the responsibility of the caller. Such
294 * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
295 *
296 * The heuristics can have false positives, bouncing unnecessarily, though the
297 * buffers would be small. False negatives are theoretically possible if, for
298 * example, multiple small kmalloc() buffers are coalesced into a larger
299 * buffer that passes the alignment check. There are no such known constructs
300 * in the kernel.
301 */
302static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
303 enum dma_data_direction dir)
304{
305 return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
306}
307
308void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
309 gfp_t gfp, unsigned long attrs);
310void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
311 dma_addr_t dma_addr, unsigned long attrs);
312
313#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
314void arch_dma_set_mask(struct device *dev, u64 mask);
315#else
316#define arch_dma_set_mask(dev, mask) do { } while (0)
317#endif
318
319#ifdef CONFIG_MMU
320/*
321 * Page protection so that devices that can't snoop CPU caches can use the
322 * memory coherently. We default to pgprot_noncached which is usually used
323 * for ioremap as a safe bet, but architectures can override this with less
324 * strict semantics if possible.
325 */
326#ifndef pgprot_dmacoherent
327#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
328#endif
329
330pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
331#else
332static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
333 unsigned long attrs)
334{
335 return prot; /* no protection bits supported without page tables */
336}
337#endif /* CONFIG_MMU */
338
339#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
340void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
341 enum dma_data_direction dir);
342#else
343static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
344 enum dma_data_direction dir)
345{
346}
347#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
348
349#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
350void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
351 enum dma_data_direction dir);
352#else
353static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
354 enum dma_data_direction dir)
355{
356}
357#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
358
359#ifndef CONFIG_ARCH_HAS_BATCHED_DMA_SYNC
360static inline void arch_sync_dma_flush(void)
361{
362}
363#endif
364
365#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
366void arch_sync_dma_for_cpu_all(void);
367#else
368static inline void arch_sync_dma_for_cpu_all(void)
369{
370}
371#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
372
373#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
374void arch_dma_prep_coherent(struct page *page, size_t size);
375#else
376static inline void arch_dma_prep_coherent(struct page *page, size_t size)
377{
378}
379#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
380
381void *arch_dma_set_uncached(void *addr, size_t size);
382void arch_dma_clear_uncached(void *addr, size_t size);
383
384#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
385bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
386bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
387bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
388 int nents);
389bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
390 int nents);
391bool arch_dma_alloc_direct(struct device *dev);
392bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle);
393#else
394#define arch_dma_map_phys_direct(d, a) (false)
395#define arch_dma_unmap_phys_direct(d, a) (false)
396#define arch_dma_map_sg_direct(d, s, n) (false)
397#define arch_dma_unmap_sg_direct(d, s, n) (false)
398#define arch_dma_alloc_direct(d) (false)
399#define arch_dma_free_direct(d, a) (false)
400#endif
401
402#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
403void arch_setup_dma_ops(struct device *dev, bool coherent);
404#else
405static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
406{
407}
408#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
409
410#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
411void arch_teardown_dma_ops(struct device *dev);
412#else
413static inline void arch_teardown_dma_ops(struct device *dev)
414{
415}
416#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
417
418#ifdef CONFIG_DMA_API_DEBUG
419void dma_debug_add_bus(const struct bus_type *bus);
420void debug_dma_dump_mappings(struct device *dev);
421#else
422static inline void dma_debug_add_bus(const struct bus_type *bus)
423{
424}
425static inline void debug_dma_dump_mappings(struct device *dev)
426{
427}
428#endif /* CONFIG_DMA_API_DEBUG */
429
430extern const struct dma_map_ops dma_dummy_ops;
431#endif /* _LINUX_DMA_MAP_OPS_H */