Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at a97c88a176b6b8d116f4d3f508f3bd02bc77b462 787 lines 27 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_DMA_MAPPING_H 3#define _LINUX_DMA_MAPPING_H 4 5#include <linux/device.h> 6#include <linux/err.h> 7#include <linux/dma-direction.h> 8#include <linux/scatterlist.h> 9#include <linux/bug.h> 10#include <linux/cache.h> 11 12/** 13 * List of possible attributes associated with a DMA mapping. The semantics 14 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. 15 */ 16 17/* 18 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 19 * may be weakly ordered, that is that reads and writes may pass each other. 20 */ 21#define DMA_ATTR_WEAK_ORDERING (1UL << 1) 22/* 23 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 24 * buffered to improve performance. 25 */ 26#define DMA_ATTR_WRITE_COMBINE (1UL << 2) 27/* 28 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 29 * virtual mapping for the allocated buffer. 30 */ 31#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 32/* 33 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 34 * the CPU cache for the given buffer assuming that it has been already 35 * transferred to 'device' domain. 36 */ 37#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 38/* 39 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 40 * in physical memory. 41 */ 42#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 43/* 44 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 45 * that it's probably not worth the time to try to allocate memory to in a way 46 * that gives better TLB efficiency. 47 */ 48#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 49/* 50 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress 51 * allocation failure reports (similarly to __GFP_NOWARN). 52 */ 53#define DMA_ATTR_NO_WARN (1UL << 8) 54 55/* 56 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully 57 * accessible at an elevated privilege level (and ideally inaccessible or 58 * at least read-only at lesser-privileged levels). 59 */ 60#define DMA_ATTR_PRIVILEGED (1UL << 9) 61 62/* 63 * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping 64 * 65 * This attribute indicates the physical address is not normal system 66 * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page() 67 * functions, it may not be cacheable, and access using CPU load/store 68 * instructions may not be allowed. 69 * 70 * Usually this will be used to describe MMIO addresses, or other non-cacheable 71 * register addresses. When DMA mapping this sort of address we call 72 * the operation Peer to Peer as a one device is DMA'ing to another device. 73 * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO 74 * is appropriate. 75 * 76 * For architectures that require cache flushing for DMA coherence 77 * DMA_ATTR_MMIO will not perform any cache flushing. The address 78 * provided must never be mapped cacheable into the CPU. 79 */ 80#define DMA_ATTR_MMIO (1UL << 10) 81 82/* 83 * DMA_ATTR_DEBUGGING_IGNORE_CACHELINES: Indicates the CPU cache line can be 84 * overlapped. All mappings sharing a cacheline must have this attribute for 85 * this to be considered safe. 86 */ 87#define DMA_ATTR_DEBUGGING_IGNORE_CACHELINES (1UL << 11) 88 89/* 90 * DMA_ATTR_REQUIRE_COHERENT: Indicates that DMA coherency is required. 91 * All mappings that carry this attribute can't work with SWIOTLB and cache 92 * flushing. 93 */ 94#define DMA_ATTR_REQUIRE_COHERENT (1UL << 12) 95 96/* 97 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can 98 * be given to a device to use as a DMA source or target. It is specific to a 99 * given device and there may be a translation between the CPU physical address 100 * space and the bus address space. 101 * 102 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not 103 * be used directly in drivers, but checked for using dma_mapping_error() 104 * instead. 105 */ 106#define DMA_MAPPING_ERROR (~(dma_addr_t)0) 107 108#define DMA_BIT_MASK(n) GENMASK_ULL((n) - 1, 0) 109 110struct dma_iova_state { 111 dma_addr_t addr; 112 u64 __size; 113}; 114 115/* 116 * Use the high bit to mark if we used swiotlb for one or more ranges. 117 */ 118#define DMA_IOVA_USE_SWIOTLB (1ULL << 63) 119 120static inline size_t dma_iova_size(struct dma_iova_state *state) 121{ 122 /* Casting is needed for 32-bits systems */ 123 return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB); 124} 125 126#ifdef CONFIG_DMA_API_DEBUG 127void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 128void debug_dma_map_single(struct device *dev, const void *addr, 129 unsigned long len); 130#else 131static inline void debug_dma_mapping_error(struct device *dev, 132 dma_addr_t dma_addr) 133{ 134} 135static inline void debug_dma_map_single(struct device *dev, const void *addr, 136 unsigned long len) 137{ 138} 139#endif /* CONFIG_DMA_API_DEBUG */ 140 141#ifdef CONFIG_HAS_DMA 142static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 143{ 144 debug_dma_mapping_error(dev, dma_addr); 145 146 if (unlikely(dma_addr == DMA_MAPPING_ERROR)) 147 return -ENOMEM; 148 return 0; 149} 150 151dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 152 size_t offset, size_t size, enum dma_data_direction dir, 153 unsigned long attrs); 154void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 155 enum dma_data_direction dir, unsigned long attrs); 156dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, 157 enum dma_data_direction dir, unsigned long attrs); 158void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, 159 enum dma_data_direction dir, unsigned long attrs); 160unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 161 int nents, enum dma_data_direction dir, unsigned long attrs); 162void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 163 int nents, enum dma_data_direction dir, 164 unsigned long attrs); 165int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 166 enum dma_data_direction dir, unsigned long attrs); 167dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 168 size_t size, enum dma_data_direction dir, unsigned long attrs); 169void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 170 enum dma_data_direction dir, unsigned long attrs); 171void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 172 gfp_t flag, unsigned long attrs); 173void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 174 dma_addr_t dma_handle, unsigned long attrs); 175void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 176 gfp_t gfp, unsigned long attrs); 177void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 178 dma_addr_t dma_handle); 179int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 180 void *cpu_addr, dma_addr_t dma_addr, size_t size, 181 unsigned long attrs); 182int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 183 void *cpu_addr, dma_addr_t dma_addr, size_t size, 184 unsigned long attrs); 185bool dma_can_mmap(struct device *dev); 186bool dma_pci_p2pdma_supported(struct device *dev); 187int dma_set_mask(struct device *dev, u64 mask); 188int dma_set_coherent_mask(struct device *dev, u64 mask); 189u64 dma_get_required_mask(struct device *dev); 190bool dma_addressing_limited(struct device *dev); 191size_t dma_max_mapping_size(struct device *dev); 192size_t dma_opt_mapping_size(struct device *dev); 193unsigned long dma_get_merge_boundary(struct device *dev); 194struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, 195 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); 196void dma_free_noncontiguous(struct device *dev, size_t size, 197 struct sg_table *sgt, enum dma_data_direction dir); 198void *dma_vmap_noncontiguous(struct device *dev, size_t size, 199 struct sg_table *sgt); 200void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); 201int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, 202 size_t size, struct sg_table *sgt); 203#else /* CONFIG_HAS_DMA */ 204static inline dma_addr_t dma_map_page_attrs(struct device *dev, 205 struct page *page, size_t offset, size_t size, 206 enum dma_data_direction dir, unsigned long attrs) 207{ 208 return DMA_MAPPING_ERROR; 209} 210static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, 211 size_t size, enum dma_data_direction dir, unsigned long attrs) 212{ 213} 214static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, 215 size_t size, enum dma_data_direction dir, unsigned long attrs) 216{ 217 return DMA_MAPPING_ERROR; 218} 219static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr, 220 size_t size, enum dma_data_direction dir, unsigned long attrs) 221{ 222} 223static inline unsigned int dma_map_sg_attrs(struct device *dev, 224 struct scatterlist *sg, int nents, enum dma_data_direction dir, 225 unsigned long attrs) 226{ 227 return 0; 228} 229static inline void dma_unmap_sg_attrs(struct device *dev, 230 struct scatterlist *sg, int nents, enum dma_data_direction dir, 231 unsigned long attrs) 232{ 233} 234static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 235 enum dma_data_direction dir, unsigned long attrs) 236{ 237 return -EOPNOTSUPP; 238} 239static inline dma_addr_t dma_map_resource(struct device *dev, 240 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, 241 unsigned long attrs) 242{ 243 return DMA_MAPPING_ERROR; 244} 245static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, 246 size_t size, enum dma_data_direction dir, unsigned long attrs) 247{ 248} 249static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 250{ 251 return -ENOMEM; 252} 253static inline void *dma_alloc_attrs(struct device *dev, size_t size, 254 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 255{ 256 return NULL; 257} 258static inline void dma_free_attrs(struct device *dev, size_t size, 259 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 260{ 261} 262static inline void *dmam_alloc_attrs(struct device *dev, size_t size, 263 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 264{ 265 return NULL; 266} 267static inline void dmam_free_coherent(struct device *dev, size_t size, 268 void *vaddr, dma_addr_t dma_handle) 269{ 270} 271static inline int dma_get_sgtable_attrs(struct device *dev, 272 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, 273 size_t size, unsigned long attrs) 274{ 275 return -ENXIO; 276} 277static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 278 void *cpu_addr, dma_addr_t dma_addr, size_t size, 279 unsigned long attrs) 280{ 281 return -ENXIO; 282} 283static inline bool dma_can_mmap(struct device *dev) 284{ 285 return false; 286} 287static inline bool dma_pci_p2pdma_supported(struct device *dev) 288{ 289 return false; 290} 291static inline int dma_set_mask(struct device *dev, u64 mask) 292{ 293 return -EIO; 294} 295static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 296{ 297 return -EIO; 298} 299static inline u64 dma_get_required_mask(struct device *dev) 300{ 301 return 0; 302} 303static inline bool dma_addressing_limited(struct device *dev) 304{ 305 return false; 306} 307static inline size_t dma_max_mapping_size(struct device *dev) 308{ 309 return 0; 310} 311static inline size_t dma_opt_mapping_size(struct device *dev) 312{ 313 return 0; 314} 315static inline unsigned long dma_get_merge_boundary(struct device *dev) 316{ 317 return 0; 318} 319static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, 320 size_t size, enum dma_data_direction dir, gfp_t gfp, 321 unsigned long attrs) 322{ 323 return NULL; 324} 325static inline void dma_free_noncontiguous(struct device *dev, size_t size, 326 struct sg_table *sgt, enum dma_data_direction dir) 327{ 328} 329static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, 330 struct sg_table *sgt) 331{ 332 return NULL; 333} 334static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) 335{ 336} 337static inline int dma_mmap_noncontiguous(struct device *dev, 338 struct vm_area_struct *vma, size_t size, struct sg_table *sgt) 339{ 340 return -EINVAL; 341} 342#endif /* CONFIG_HAS_DMA */ 343 344#ifdef CONFIG_IOMMU_DMA 345/** 346 * dma_use_iova - check if the IOVA API is used for this state 347 * @state: IOVA state 348 * 349 * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if 350 * they can't be used. 351 */ 352static inline bool dma_use_iova(struct dma_iova_state *state) 353{ 354 return state->__size != 0; 355} 356 357bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state, 358 phys_addr_t phys, size_t size); 359void dma_iova_free(struct device *dev, struct dma_iova_state *state); 360void dma_iova_destroy(struct device *dev, struct dma_iova_state *state, 361 size_t mapped_len, enum dma_data_direction dir, 362 unsigned long attrs); 363int dma_iova_sync(struct device *dev, struct dma_iova_state *state, 364 size_t offset, size_t size); 365int dma_iova_link(struct device *dev, struct dma_iova_state *state, 366 phys_addr_t phys, size_t offset, size_t size, 367 enum dma_data_direction dir, unsigned long attrs); 368void dma_iova_unlink(struct device *dev, struct dma_iova_state *state, 369 size_t offset, size_t size, enum dma_data_direction dir, 370 unsigned long attrs); 371#else /* CONFIG_IOMMU_DMA */ 372static inline bool dma_use_iova(struct dma_iova_state *state) 373{ 374 return false; 375} 376static inline bool dma_iova_try_alloc(struct device *dev, 377 struct dma_iova_state *state, phys_addr_t phys, size_t size) 378{ 379 return false; 380} 381static inline void dma_iova_free(struct device *dev, 382 struct dma_iova_state *state) 383{ 384} 385static inline void dma_iova_destroy(struct device *dev, 386 struct dma_iova_state *state, size_t mapped_len, 387 enum dma_data_direction dir, unsigned long attrs) 388{ 389} 390static inline int dma_iova_sync(struct device *dev, 391 struct dma_iova_state *state, size_t offset, size_t size) 392{ 393 return -EOPNOTSUPP; 394} 395static inline int dma_iova_link(struct device *dev, 396 struct dma_iova_state *state, phys_addr_t phys, size_t offset, 397 size_t size, enum dma_data_direction dir, unsigned long attrs) 398{ 399 return -EOPNOTSUPP; 400} 401static inline void dma_iova_unlink(struct device *dev, 402 struct dma_iova_state *state, size_t offset, size_t size, 403 enum dma_data_direction dir, unsigned long attrs) 404{ 405} 406#endif /* CONFIG_IOMMU_DMA */ 407 408#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) 409void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 410 enum dma_data_direction dir); 411void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, 412 size_t size, enum dma_data_direction dir); 413void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 414 int nelems, enum dma_data_direction dir); 415void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 416 int nelems, enum dma_data_direction dir); 417bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); 418 419static inline bool dma_dev_need_sync(const struct device *dev) 420{ 421 /* Always call DMA sync operations when debugging is enabled */ 422 return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); 423} 424 425static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 426 size_t size, enum dma_data_direction dir) 427{ 428 if (dma_dev_need_sync(dev)) 429 __dma_sync_single_for_cpu(dev, addr, size, dir); 430} 431 432static inline void dma_sync_single_for_device(struct device *dev, 433 dma_addr_t addr, size_t size, enum dma_data_direction dir) 434{ 435 if (dma_dev_need_sync(dev)) 436 __dma_sync_single_for_device(dev, addr, size, dir); 437} 438 439static inline void dma_sync_sg_for_cpu(struct device *dev, 440 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 441{ 442 if (dma_dev_need_sync(dev)) 443 __dma_sync_sg_for_cpu(dev, sg, nelems, dir); 444} 445 446static inline void dma_sync_sg_for_device(struct device *dev, 447 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 448{ 449 if (dma_dev_need_sync(dev)) 450 __dma_sync_sg_for_device(dev, sg, nelems, dir); 451} 452 453static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 454{ 455 return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; 456} 457bool dma_need_unmap(struct device *dev); 458#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ 459static inline bool dma_dev_need_sync(const struct device *dev) 460{ 461 return false; 462} 463static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 464 size_t size, enum dma_data_direction dir) 465{ 466} 467static inline void dma_sync_single_for_device(struct device *dev, 468 dma_addr_t addr, size_t size, enum dma_data_direction dir) 469{ 470} 471static inline void dma_sync_sg_for_cpu(struct device *dev, 472 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 473{ 474} 475static inline void dma_sync_sg_for_device(struct device *dev, 476 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 477{ 478} 479static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 480{ 481 return false; 482} 483static inline bool dma_need_unmap(struct device *dev) 484{ 485 return false; 486} 487#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ 488 489struct page *dma_alloc_pages(struct device *dev, size_t size, 490 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 491void dma_free_pages(struct device *dev, size_t size, struct page *page, 492 dma_addr_t dma_handle, enum dma_data_direction dir); 493int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, 494 size_t size, struct page *page); 495 496static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 497 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 498{ 499 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); 500 return page ? page_address(page) : NULL; 501} 502 503static inline void dma_free_noncoherent(struct device *dev, size_t size, 504 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) 505{ 506 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); 507} 508 509static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 510 size_t size, enum dma_data_direction dir, unsigned long attrs) 511{ 512 /* DMA must never operate on areas that might be remapped. */ 513 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), 514 "rejecting DMA map of vmalloc memory\n")) 515 return DMA_MAPPING_ERROR; 516 debug_dma_map_single(dev, ptr, size); 517 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), 518 size, dir, attrs); 519} 520 521static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 522 size_t size, enum dma_data_direction dir, unsigned long attrs) 523{ 524 return dma_unmap_page_attrs(dev, addr, size, dir, attrs); 525} 526 527static inline void dma_sync_single_range_for_cpu(struct device *dev, 528 dma_addr_t addr, unsigned long offset, size_t size, 529 enum dma_data_direction dir) 530{ 531 return dma_sync_single_for_cpu(dev, addr + offset, size, dir); 532} 533 534static inline void dma_sync_single_range_for_device(struct device *dev, 535 dma_addr_t addr, unsigned long offset, size_t size, 536 enum dma_data_direction dir) 537{ 538 return dma_sync_single_for_device(dev, addr + offset, size, dir); 539} 540 541/** 542 * dma_unmap_sgtable - Unmap the given buffer for DMA 543 * @dev: The device for which to perform the DMA operation 544 * @sgt: The sg_table object describing the buffer 545 * @dir: DMA direction 546 * @attrs: Optional DMA attributes for the unmap operation 547 * 548 * Unmaps a buffer described by a scatterlist stored in the given sg_table 549 * object for the @dir DMA operation by the @dev device. After this function 550 * the ownership of the buffer is transferred back to the CPU domain. 551 */ 552static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, 553 enum dma_data_direction dir, unsigned long attrs) 554{ 555 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 556} 557 558/** 559 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access 560 * @dev: The device for which to perform the DMA operation 561 * @sgt: The sg_table object describing the buffer 562 * @dir: DMA direction 563 * 564 * Performs the needed cache synchronization and moves the ownership of the 565 * buffer back to the CPU domain, so it is safe to perform any access to it 566 * by the CPU. Before doing any further DMA operations, one has to transfer 567 * the ownership of the buffer back to the DMA domain by calling the 568 * dma_sync_sgtable_for_device(). 569 */ 570static inline void dma_sync_sgtable_for_cpu(struct device *dev, 571 struct sg_table *sgt, enum dma_data_direction dir) 572{ 573 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); 574} 575 576/** 577 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA 578 * @dev: The device for which to perform the DMA operation 579 * @sgt: The sg_table object describing the buffer 580 * @dir: DMA direction 581 * 582 * Performs the needed cache synchronization and moves the ownership of the 583 * buffer back to the DMA domain, so it is safe to perform the DMA operation. 584 * Once finished, one has to call dma_sync_sgtable_for_cpu() or 585 * dma_unmap_sgtable(). 586 */ 587static inline void dma_sync_sgtable_for_device(struct device *dev, 588 struct sg_table *sgt, enum dma_data_direction dir) 589{ 590 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); 591} 592 593#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 594#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 595#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 596#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 597#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) 598#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) 599#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 600#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 601 602bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); 603 604static inline void *dma_alloc_coherent(struct device *dev, size_t size, 605 dma_addr_t *dma_handle, gfp_t gfp) 606{ 607 return dma_alloc_attrs(dev, size, dma_handle, gfp, 608 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); 609} 610 611static inline void dma_free_coherent(struct device *dev, size_t size, 612 void *cpu_addr, dma_addr_t dma_handle) 613{ 614 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 615} 616 617 618static inline u64 dma_get_mask(struct device *dev) 619{ 620 if (dev->dma_mask && *dev->dma_mask) 621 return *dev->dma_mask; 622 return DMA_BIT_MASK(32); 623} 624 625/* 626 * Set both the DMA mask and the coherent DMA mask to the same thing. 627 * Note that we don't check the return value from dma_set_coherent_mask() 628 * as the DMA API guarantees that the coherent DMA mask can be set to 629 * the same or smaller than the streaming DMA mask. 630 */ 631static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 632{ 633 int rc = dma_set_mask(dev, mask); 634 if (rc == 0) 635 dma_set_coherent_mask(dev, mask); 636 return rc; 637} 638 639/* 640 * Similar to the above, except it deals with the case where the device 641 * does not have dev->dma_mask appropriately setup. 642 */ 643static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 644{ 645 dev->dma_mask = &dev->coherent_dma_mask; 646 return dma_set_mask_and_coherent(dev, mask); 647} 648 649static inline unsigned int dma_get_max_seg_size(struct device *dev) 650{ 651 if (dev->dma_parms && dev->dma_parms->max_segment_size) 652 return dev->dma_parms->max_segment_size; 653 return SZ_64K; 654} 655 656static inline void dma_set_max_seg_size(struct device *dev, unsigned int size) 657{ 658 if (WARN_ON_ONCE(!dev->dma_parms)) 659 return; 660 dev->dma_parms->max_segment_size = size; 661} 662 663static inline unsigned long dma_get_seg_boundary(struct device *dev) 664{ 665 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) 666 return dev->dma_parms->segment_boundary_mask; 667 return ULONG_MAX; 668} 669 670/** 671 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units 672 * @dev: device to guery the boundary for 673 * @page_shift: ilog() of the IOMMU page size 674 * 675 * Return the segment boundary in IOMMU page units (which may be different from 676 * the CPU page size) for the passed in device. 677 * 678 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for 679 * non-DMA API callers. 680 */ 681static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, 682 unsigned int page_shift) 683{ 684 if (!dev) 685 return (U32_MAX >> page_shift) + 1; 686 return (dma_get_seg_boundary(dev) >> page_shift) + 1; 687} 688 689static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask) 690{ 691 if (WARN_ON_ONCE(!dev->dma_parms)) 692 return; 693 dev->dma_parms->segment_boundary_mask = mask; 694} 695 696static inline unsigned int dma_get_min_align_mask(struct device *dev) 697{ 698 if (dev->dma_parms) 699 return dev->dma_parms->min_align_mask; 700 return 0; 701} 702 703static inline void dma_set_min_align_mask(struct device *dev, 704 unsigned int min_align_mask) 705{ 706 if (WARN_ON_ONCE(!dev->dma_parms)) 707 return; 708 dev->dma_parms->min_align_mask = min_align_mask; 709} 710 711#ifndef dma_get_cache_alignment 712static inline int dma_get_cache_alignment(void) 713{ 714#ifdef ARCH_HAS_DMA_MINALIGN 715 return ARCH_DMA_MINALIGN; 716#endif 717 return 1; 718} 719#endif 720 721#ifdef ARCH_HAS_DMA_MINALIGN 722#define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN) 723#else 724#define ____dma_from_device_aligned 725#endif 726/* Mark start of DMA buffer */ 727#define __dma_from_device_group_begin(GROUP) \ 728 __cacheline_group_begin(GROUP) ____dma_from_device_aligned 729/* Mark end of DMA buffer */ 730#define __dma_from_device_group_end(GROUP) \ 731 __cacheline_group_end(GROUP) ____dma_from_device_aligned 732 733static inline void *dmam_alloc_coherent(struct device *dev, size_t size, 734 dma_addr_t *dma_handle, gfp_t gfp) 735{ 736 return dmam_alloc_attrs(dev, size, dma_handle, gfp, 737 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); 738} 739 740static inline void *dma_alloc_wc(struct device *dev, size_t size, 741 dma_addr_t *dma_addr, gfp_t gfp) 742{ 743 unsigned long attrs = DMA_ATTR_WRITE_COMBINE; 744 745 if (gfp & __GFP_NOWARN) 746 attrs |= DMA_ATTR_NO_WARN; 747 748 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); 749} 750 751static inline void dma_free_wc(struct device *dev, size_t size, 752 void *cpu_addr, dma_addr_t dma_addr) 753{ 754 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 755 DMA_ATTR_WRITE_COMBINE); 756} 757 758static inline int dma_mmap_wc(struct device *dev, 759 struct vm_area_struct *vma, 760 void *cpu_addr, dma_addr_t dma_addr, 761 size_t size) 762{ 763 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 764 DMA_ATTR_WRITE_COMBINE); 765} 766 767#ifdef CONFIG_NEED_DMA_MAP_STATE 768#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 769#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 770#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 771#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 772#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 773#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 774#else 775#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 776#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 777#define dma_unmap_addr(PTR, ADDR_NAME) \ 778 ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) 779#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ 780 do { typeof(PTR) __p __maybe_unused = PTR; } while (0) 781#define dma_unmap_len(PTR, LEN_NAME) \ 782 ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) 783#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ 784 do { typeof(PTR) __p __maybe_unused = PTR; } while (0) 785#endif 786 787#endif /* _LINUX_DMA_MAPPING_H */