Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
4
5#include <linux/device.h>
6#include <linux/err.h>
7#include <linux/dma-direction.h>
8#include <linux/scatterlist.h>
9#include <linux/bug.h>
10#include <linux/cache.h>
11
12/**
13 * List of possible attributes associated with a DMA mapping. The semantics
14 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
15 */
16
17/*
18 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
19 * may be weakly ordered, that is that reads and writes may pass each other.
20 */
21#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
22/*
23 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
24 * buffered to improve performance.
25 */
26#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
27/*
28 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
29 * virtual mapping for the allocated buffer.
30 */
31#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
32/*
33 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
34 * the CPU cache for the given buffer assuming that it has been already
35 * transferred to 'device' domain.
36 */
37#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
38/*
39 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
40 * in physical memory.
41 */
42#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
43/*
44 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
45 * that it's probably not worth the time to try to allocate memory to in a way
46 * that gives better TLB efficiency.
47 */
48#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
49/*
50 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
51 * allocation failure reports (similarly to __GFP_NOWARN).
52 */
53#define DMA_ATTR_NO_WARN (1UL << 8)
54
55/*
56 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
57 * accessible at an elevated privilege level (and ideally inaccessible or
58 * at least read-only at lesser-privileged levels).
59 */
60#define DMA_ATTR_PRIVILEGED (1UL << 9)
61
62/*
63 * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
64 *
65 * This attribute indicates the physical address is not normal system
66 * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
67 * functions, it may not be cacheable, and access using CPU load/store
68 * instructions may not be allowed.
69 *
70 * Usually this will be used to describe MMIO addresses, or other non-cacheable
71 * register addresses. When DMA mapping this sort of address we call
72 * the operation Peer to Peer as a one device is DMA'ing to another device.
73 * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
74 * is appropriate.
75 *
76 * For architectures that require cache flushing for DMA coherence
77 * DMA_ATTR_MMIO will not perform any cache flushing. The address
78 * provided must never be mapped cacheable into the CPU.
79 */
80#define DMA_ATTR_MMIO (1UL << 10)
81
82/*
83 * DMA_ATTR_CPU_CACHE_CLEAN: Indicates the CPU will not dirty any cacheline
84 * overlapping this buffer while it is mapped for DMA. All mappings sharing
85 * a cacheline must have this attribute for this to be considered safe.
86 */
87#define DMA_ATTR_CPU_CACHE_CLEAN (1UL << 11)
88
89/*
90 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
91 * be given to a device to use as a DMA source or target. It is specific to a
92 * given device and there may be a translation between the CPU physical address
93 * space and the bus address space.
94 *
95 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
96 * be used directly in drivers, but checked for using dma_mapping_error()
97 * instead.
98 */
99#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
100
101#define DMA_BIT_MASK(n) GENMASK_ULL((n) - 1, 0)
102
103struct dma_iova_state {
104 dma_addr_t addr;
105 u64 __size;
106};
107
108/*
109 * Use the high bit to mark if we used swiotlb for one or more ranges.
110 */
111#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
112
113static inline size_t dma_iova_size(struct dma_iova_state *state)
114{
115 /* Casting is needed for 32-bits systems */
116 return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
117}
118
119#ifdef CONFIG_DMA_API_DEBUG
120void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
121void debug_dma_map_single(struct device *dev, const void *addr,
122 unsigned long len);
123#else
124static inline void debug_dma_mapping_error(struct device *dev,
125 dma_addr_t dma_addr)
126{
127}
128static inline void debug_dma_map_single(struct device *dev, const void *addr,
129 unsigned long len)
130{
131}
132#endif /* CONFIG_DMA_API_DEBUG */
133
134#ifdef CONFIG_HAS_DMA
135static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
136{
137 debug_dma_mapping_error(dev, dma_addr);
138
139 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
140 return -ENOMEM;
141 return 0;
142}
143
144dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
145 size_t offset, size_t size, enum dma_data_direction dir,
146 unsigned long attrs);
147void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
148 enum dma_data_direction dir, unsigned long attrs);
149dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
150 enum dma_data_direction dir, unsigned long attrs);
151void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
152 enum dma_data_direction dir, unsigned long attrs);
153unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
154 int nents, enum dma_data_direction dir, unsigned long attrs);
155void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
156 int nents, enum dma_data_direction dir,
157 unsigned long attrs);
158int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
159 enum dma_data_direction dir, unsigned long attrs);
160dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
161 size_t size, enum dma_data_direction dir, unsigned long attrs);
162void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
163 enum dma_data_direction dir, unsigned long attrs);
164void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
165 gfp_t flag, unsigned long attrs);
166void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
167 dma_addr_t dma_handle, unsigned long attrs);
168void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
169 gfp_t gfp, unsigned long attrs);
170void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
171 dma_addr_t dma_handle);
172int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
173 void *cpu_addr, dma_addr_t dma_addr, size_t size,
174 unsigned long attrs);
175int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
176 void *cpu_addr, dma_addr_t dma_addr, size_t size,
177 unsigned long attrs);
178bool dma_can_mmap(struct device *dev);
179bool dma_pci_p2pdma_supported(struct device *dev);
180int dma_set_mask(struct device *dev, u64 mask);
181int dma_set_coherent_mask(struct device *dev, u64 mask);
182u64 dma_get_required_mask(struct device *dev);
183bool dma_addressing_limited(struct device *dev);
184size_t dma_max_mapping_size(struct device *dev);
185size_t dma_opt_mapping_size(struct device *dev);
186unsigned long dma_get_merge_boundary(struct device *dev);
187struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
188 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
189void dma_free_noncontiguous(struct device *dev, size_t size,
190 struct sg_table *sgt, enum dma_data_direction dir);
191void *dma_vmap_noncontiguous(struct device *dev, size_t size,
192 struct sg_table *sgt);
193void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
194int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
195 size_t size, struct sg_table *sgt);
196#else /* CONFIG_HAS_DMA */
197static inline dma_addr_t dma_map_page_attrs(struct device *dev,
198 struct page *page, size_t offset, size_t size,
199 enum dma_data_direction dir, unsigned long attrs)
200{
201 return DMA_MAPPING_ERROR;
202}
203static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
204 size_t size, enum dma_data_direction dir, unsigned long attrs)
205{
206}
207static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
208 size_t size, enum dma_data_direction dir, unsigned long attrs)
209{
210 return DMA_MAPPING_ERROR;
211}
212static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
213 size_t size, enum dma_data_direction dir, unsigned long attrs)
214{
215}
216static inline unsigned int dma_map_sg_attrs(struct device *dev,
217 struct scatterlist *sg, int nents, enum dma_data_direction dir,
218 unsigned long attrs)
219{
220 return 0;
221}
222static inline void dma_unmap_sg_attrs(struct device *dev,
223 struct scatterlist *sg, int nents, enum dma_data_direction dir,
224 unsigned long attrs)
225{
226}
227static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
228 enum dma_data_direction dir, unsigned long attrs)
229{
230 return -EOPNOTSUPP;
231}
232static inline dma_addr_t dma_map_resource(struct device *dev,
233 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
234 unsigned long attrs)
235{
236 return DMA_MAPPING_ERROR;
237}
238static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
239 size_t size, enum dma_data_direction dir, unsigned long attrs)
240{
241}
242static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
243{
244 return -ENOMEM;
245}
246static inline void *dma_alloc_attrs(struct device *dev, size_t size,
247 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
248{
249 return NULL;
250}
251static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
252 dma_addr_t dma_handle, unsigned long attrs)
253{
254}
255static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
256 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
257{
258 return NULL;
259}
260static inline void dmam_free_coherent(struct device *dev, size_t size,
261 void *vaddr, dma_addr_t dma_handle)
262{
263}
264static inline int dma_get_sgtable_attrs(struct device *dev,
265 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
266 size_t size, unsigned long attrs)
267{
268 return -ENXIO;
269}
270static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
271 void *cpu_addr, dma_addr_t dma_addr, size_t size,
272 unsigned long attrs)
273{
274 return -ENXIO;
275}
276static inline bool dma_can_mmap(struct device *dev)
277{
278 return false;
279}
280static inline bool dma_pci_p2pdma_supported(struct device *dev)
281{
282 return false;
283}
284static inline int dma_set_mask(struct device *dev, u64 mask)
285{
286 return -EIO;
287}
288static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
289{
290 return -EIO;
291}
292static inline u64 dma_get_required_mask(struct device *dev)
293{
294 return 0;
295}
296static inline bool dma_addressing_limited(struct device *dev)
297{
298 return false;
299}
300static inline size_t dma_max_mapping_size(struct device *dev)
301{
302 return 0;
303}
304static inline size_t dma_opt_mapping_size(struct device *dev)
305{
306 return 0;
307}
308static inline unsigned long dma_get_merge_boundary(struct device *dev)
309{
310 return 0;
311}
312static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
313 size_t size, enum dma_data_direction dir, gfp_t gfp,
314 unsigned long attrs)
315{
316 return NULL;
317}
318static inline void dma_free_noncontiguous(struct device *dev, size_t size,
319 struct sg_table *sgt, enum dma_data_direction dir)
320{
321}
322static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
323 struct sg_table *sgt)
324{
325 return NULL;
326}
327static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
328{
329}
330static inline int dma_mmap_noncontiguous(struct device *dev,
331 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
332{
333 return -EINVAL;
334}
335#endif /* CONFIG_HAS_DMA */
336
337#ifdef CONFIG_IOMMU_DMA
338/**
339 * dma_use_iova - check if the IOVA API is used for this state
340 * @state: IOVA state
341 *
342 * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
343 * they can't be used.
344 */
345static inline bool dma_use_iova(struct dma_iova_state *state)
346{
347 return state->__size != 0;
348}
349
350bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
351 phys_addr_t phys, size_t size);
352void dma_iova_free(struct device *dev, struct dma_iova_state *state);
353void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
354 size_t mapped_len, enum dma_data_direction dir,
355 unsigned long attrs);
356int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
357 size_t offset, size_t size);
358int dma_iova_link(struct device *dev, struct dma_iova_state *state,
359 phys_addr_t phys, size_t offset, size_t size,
360 enum dma_data_direction dir, unsigned long attrs);
361void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
362 size_t offset, size_t size, enum dma_data_direction dir,
363 unsigned long attrs);
364#else /* CONFIG_IOMMU_DMA */
365static inline bool dma_use_iova(struct dma_iova_state *state)
366{
367 return false;
368}
369static inline bool dma_iova_try_alloc(struct device *dev,
370 struct dma_iova_state *state, phys_addr_t phys, size_t size)
371{
372 return false;
373}
374static inline void dma_iova_free(struct device *dev,
375 struct dma_iova_state *state)
376{
377}
378static inline void dma_iova_destroy(struct device *dev,
379 struct dma_iova_state *state, size_t mapped_len,
380 enum dma_data_direction dir, unsigned long attrs)
381{
382}
383static inline int dma_iova_sync(struct device *dev,
384 struct dma_iova_state *state, size_t offset, size_t size)
385{
386 return -EOPNOTSUPP;
387}
388static inline int dma_iova_link(struct device *dev,
389 struct dma_iova_state *state, phys_addr_t phys, size_t offset,
390 size_t size, enum dma_data_direction dir, unsigned long attrs)
391{
392 return -EOPNOTSUPP;
393}
394static inline void dma_iova_unlink(struct device *dev,
395 struct dma_iova_state *state, size_t offset, size_t size,
396 enum dma_data_direction dir, unsigned long attrs)
397{
398}
399#endif /* CONFIG_IOMMU_DMA */
400
401#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
402void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
403 enum dma_data_direction dir);
404void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
405 size_t size, enum dma_data_direction dir);
406void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
407 int nelems, enum dma_data_direction dir);
408void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
409 int nelems, enum dma_data_direction dir);
410bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
411
412static inline bool dma_dev_need_sync(const struct device *dev)
413{
414 /* Always call DMA sync operations when debugging is enabled */
415 return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
416}
417
418static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
419 size_t size, enum dma_data_direction dir)
420{
421 if (dma_dev_need_sync(dev))
422 __dma_sync_single_for_cpu(dev, addr, size, dir);
423}
424
425static inline void dma_sync_single_for_device(struct device *dev,
426 dma_addr_t addr, size_t size, enum dma_data_direction dir)
427{
428 if (dma_dev_need_sync(dev))
429 __dma_sync_single_for_device(dev, addr, size, dir);
430}
431
432static inline void dma_sync_sg_for_cpu(struct device *dev,
433 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
434{
435 if (dma_dev_need_sync(dev))
436 __dma_sync_sg_for_cpu(dev, sg, nelems, dir);
437}
438
439static inline void dma_sync_sg_for_device(struct device *dev,
440 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
441{
442 if (dma_dev_need_sync(dev))
443 __dma_sync_sg_for_device(dev, sg, nelems, dir);
444}
445
446static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
447{
448 return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
449}
450bool dma_need_unmap(struct device *dev);
451#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
452static inline bool dma_dev_need_sync(const struct device *dev)
453{
454 return false;
455}
456static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
457 size_t size, enum dma_data_direction dir)
458{
459}
460static inline void dma_sync_single_for_device(struct device *dev,
461 dma_addr_t addr, size_t size, enum dma_data_direction dir)
462{
463}
464static inline void dma_sync_sg_for_cpu(struct device *dev,
465 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
466{
467}
468static inline void dma_sync_sg_for_device(struct device *dev,
469 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
470{
471}
472static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
473{
474 return false;
475}
476static inline bool dma_need_unmap(struct device *dev)
477{
478 return false;
479}
480#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
481
482struct page *dma_alloc_pages(struct device *dev, size_t size,
483 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
484void dma_free_pages(struct device *dev, size_t size, struct page *page,
485 dma_addr_t dma_handle, enum dma_data_direction dir);
486int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
487 size_t size, struct page *page);
488
489static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
490 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
491{
492 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
493 return page ? page_address(page) : NULL;
494}
495
496static inline void dma_free_noncoherent(struct device *dev, size_t size,
497 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
498{
499 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
500}
501
502static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
503 size_t size, enum dma_data_direction dir, unsigned long attrs)
504{
505 /* DMA must never operate on areas that might be remapped. */
506 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
507 "rejecting DMA map of vmalloc memory\n"))
508 return DMA_MAPPING_ERROR;
509 debug_dma_map_single(dev, ptr, size);
510 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
511 size, dir, attrs);
512}
513
514static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
515 size_t size, enum dma_data_direction dir, unsigned long attrs)
516{
517 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
518}
519
520static inline void dma_sync_single_range_for_cpu(struct device *dev,
521 dma_addr_t addr, unsigned long offset, size_t size,
522 enum dma_data_direction dir)
523{
524 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
525}
526
527static inline void dma_sync_single_range_for_device(struct device *dev,
528 dma_addr_t addr, unsigned long offset, size_t size,
529 enum dma_data_direction dir)
530{
531 return dma_sync_single_for_device(dev, addr + offset, size, dir);
532}
533
534/**
535 * dma_unmap_sgtable - Unmap the given buffer for DMA
536 * @dev: The device for which to perform the DMA operation
537 * @sgt: The sg_table object describing the buffer
538 * @dir: DMA direction
539 * @attrs: Optional DMA attributes for the unmap operation
540 *
541 * Unmaps a buffer described by a scatterlist stored in the given sg_table
542 * object for the @dir DMA operation by the @dev device. After this function
543 * the ownership of the buffer is transferred back to the CPU domain.
544 */
545static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
546 enum dma_data_direction dir, unsigned long attrs)
547{
548 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
549}
550
551/**
552 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
553 * @dev: The device for which to perform the DMA operation
554 * @sgt: The sg_table object describing the buffer
555 * @dir: DMA direction
556 *
557 * Performs the needed cache synchronization and moves the ownership of the
558 * buffer back to the CPU domain, so it is safe to perform any access to it
559 * by the CPU. Before doing any further DMA operations, one has to transfer
560 * the ownership of the buffer back to the DMA domain by calling the
561 * dma_sync_sgtable_for_device().
562 */
563static inline void dma_sync_sgtable_for_cpu(struct device *dev,
564 struct sg_table *sgt, enum dma_data_direction dir)
565{
566 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
567}
568
569/**
570 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
571 * @dev: The device for which to perform the DMA operation
572 * @sgt: The sg_table object describing the buffer
573 * @dir: DMA direction
574 *
575 * Performs the needed cache synchronization and moves the ownership of the
576 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
577 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
578 * dma_unmap_sgtable().
579 */
580static inline void dma_sync_sgtable_for_device(struct device *dev,
581 struct sg_table *sgt, enum dma_data_direction dir)
582{
583 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
584}
585
586#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
587#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
588#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
589#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
590#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
591#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
592#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
593#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
594
595bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
596
597static inline void *dma_alloc_coherent(struct device *dev, size_t size,
598 dma_addr_t *dma_handle, gfp_t gfp)
599{
600 return dma_alloc_attrs(dev, size, dma_handle, gfp,
601 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
602}
603
604static inline void dma_free_coherent(struct device *dev, size_t size,
605 void *cpu_addr, dma_addr_t dma_handle)
606{
607 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
608}
609
610
611static inline u64 dma_get_mask(struct device *dev)
612{
613 if (dev->dma_mask && *dev->dma_mask)
614 return *dev->dma_mask;
615 return DMA_BIT_MASK(32);
616}
617
618/*
619 * Set both the DMA mask and the coherent DMA mask to the same thing.
620 * Note that we don't check the return value from dma_set_coherent_mask()
621 * as the DMA API guarantees that the coherent DMA mask can be set to
622 * the same or smaller than the streaming DMA mask.
623 */
624static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
625{
626 int rc = dma_set_mask(dev, mask);
627 if (rc == 0)
628 dma_set_coherent_mask(dev, mask);
629 return rc;
630}
631
632/*
633 * Similar to the above, except it deals with the case where the device
634 * does not have dev->dma_mask appropriately setup.
635 */
636static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
637{
638 dev->dma_mask = &dev->coherent_dma_mask;
639 return dma_set_mask_and_coherent(dev, mask);
640}
641
642static inline unsigned int dma_get_max_seg_size(struct device *dev)
643{
644 if (dev->dma_parms && dev->dma_parms->max_segment_size)
645 return dev->dma_parms->max_segment_size;
646 return SZ_64K;
647}
648
649static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
650{
651 if (WARN_ON_ONCE(!dev->dma_parms))
652 return;
653 dev->dma_parms->max_segment_size = size;
654}
655
656static inline unsigned long dma_get_seg_boundary(struct device *dev)
657{
658 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
659 return dev->dma_parms->segment_boundary_mask;
660 return ULONG_MAX;
661}
662
663/**
664 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
665 * @dev: device to guery the boundary for
666 * @page_shift: ilog() of the IOMMU page size
667 *
668 * Return the segment boundary in IOMMU page units (which may be different from
669 * the CPU page size) for the passed in device.
670 *
671 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
672 * non-DMA API callers.
673 */
674static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
675 unsigned int page_shift)
676{
677 if (!dev)
678 return (U32_MAX >> page_shift) + 1;
679 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
680}
681
682static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
683{
684 if (WARN_ON_ONCE(!dev->dma_parms))
685 return;
686 dev->dma_parms->segment_boundary_mask = mask;
687}
688
689static inline unsigned int dma_get_min_align_mask(struct device *dev)
690{
691 if (dev->dma_parms)
692 return dev->dma_parms->min_align_mask;
693 return 0;
694}
695
696static inline void dma_set_min_align_mask(struct device *dev,
697 unsigned int min_align_mask)
698{
699 if (WARN_ON_ONCE(!dev->dma_parms))
700 return;
701 dev->dma_parms->min_align_mask = min_align_mask;
702}
703
704#ifndef dma_get_cache_alignment
705static inline int dma_get_cache_alignment(void)
706{
707#ifdef ARCH_HAS_DMA_MINALIGN
708 return ARCH_DMA_MINALIGN;
709#endif
710 return 1;
711}
712#endif
713
714#ifdef ARCH_HAS_DMA_MINALIGN
715#define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN)
716#else
717#define ____dma_from_device_aligned
718#endif
719/* Mark start of DMA buffer */
720#define __dma_from_device_group_begin(GROUP) \
721 __cacheline_group_begin(GROUP) ____dma_from_device_aligned
722/* Mark end of DMA buffer */
723#define __dma_from_device_group_end(GROUP) \
724 __cacheline_group_end(GROUP) ____dma_from_device_aligned
725
726static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
727 dma_addr_t *dma_handle, gfp_t gfp)
728{
729 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
730 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
731}
732
733static inline void *dma_alloc_wc(struct device *dev, size_t size,
734 dma_addr_t *dma_addr, gfp_t gfp)
735{
736 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
737
738 if (gfp & __GFP_NOWARN)
739 attrs |= DMA_ATTR_NO_WARN;
740
741 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
742}
743
744static inline void dma_free_wc(struct device *dev, size_t size,
745 void *cpu_addr, dma_addr_t dma_addr)
746{
747 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
748 DMA_ATTR_WRITE_COMBINE);
749}
750
751static inline int dma_mmap_wc(struct device *dev,
752 struct vm_area_struct *vma,
753 void *cpu_addr, dma_addr_t dma_addr,
754 size_t size)
755{
756 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
757 DMA_ATTR_WRITE_COMBINE);
758}
759
760#ifdef CONFIG_NEED_DMA_MAP_STATE
761#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
762#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
763#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
764#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
765#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
766#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
767#else
768#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
769#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
770#define dma_unmap_addr(PTR, ADDR_NAME) \
771 ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
772#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
773 do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
774#define dma_unmap_len(PTR, LEN_NAME) \
775 ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
776#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
777 do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
778#endif
779
780#endif /* _LINUX_DMA_MAPPING_H */