Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dmaengine-fix-4.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"Some fixes has piled up, so time to send them upstream.

These fixes include:
- at_xdmac fixes for residue and other stuff
- update MAINTAINERS for dma dt bindings
- mv_xor fix for incorrect offset"

* tag 'dmaengine-fix-4.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: mv_xor: Fix incorrect offset in dma_map_page()
dmaengine: at_xdmac: double FIFO flush needed to compute residue
dmaengine: at_xdmac: fix residue corruption
dmaengine: at_xdmac: align descriptors on 64 bits
MAINTAINERS: Add file patterns for dma device tree bindings

+64 -29
+1
MAINTAINERS
··· 3778 3778 S: Maintained 3779 3779 F: drivers/dma/ 3780 3780 F: include/linux/dmaengine.h 3781 + F: Documentation/devicetree/bindings/dma/ 3781 3782 F: Documentation/dmaengine/ 3782 3783 T: git git://git.infradead.org/users/vkoul/slave-dma.git 3783 3784
+57 -25
drivers/dma/at_xdmac.c
··· 242 242 u32 mbr_dus; /* Destination Microblock Stride Register */ 243 243 }; 244 244 245 - 245 + /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ 246 246 struct at_xdmac_desc { 247 247 struct at_xdmac_lld lld; 248 248 enum dma_transfer_direction direction; ··· 253 253 unsigned int xfer_size; 254 254 struct list_head descs_list; 255 255 struct list_head xfer_node; 256 - }; 256 + } __aligned(sizeof(u64)); 257 257 258 258 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) 259 259 { ··· 1400 1400 u32 cur_nda, check_nda, cur_ubc, mask, value; 1401 1401 u8 dwidth = 0; 1402 1402 unsigned long flags; 1403 + bool initd; 1403 1404 1404 1405 ret = dma_cookie_status(chan, cookie, txstate); 1405 1406 if (ret == DMA_COMPLETE) ··· 1425 1424 residue = desc->xfer_size; 1426 1425 /* 1427 1426 * Flush FIFO: only relevant when the transfer is source peripheral 1428 - * synchronized. 1427 + * synchronized. Flush is needed before reading CUBC because data in 1428 + * the FIFO are not reported by CUBC. Reporting a residue of the 1429 + * transfer length while we have data in FIFO can cause issue. 1430 + * Usecase: atmel USART has a timeout which means I have received 1431 + * characters but there is no more character received for a while. On 1432 + * timeout, it requests the residue. If the data are in the DMA FIFO, 1433 + * we will return a residue of the transfer length. It means no data 1434 + * received. If an application is waiting for these data, it will hang 1435 + * since we won't have another USART timeout without receiving new 1436 + * data. 1429 1437 */ 1430 1438 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; 1431 1439 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; ··· 1445 1435 } 1446 1436 1447 1437 /* 1448 - * When processing the residue, we need to read two registers but we 1449 - * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where 1450 - * we stand in the descriptor list and AT_XDMAC_CUBC is used 1451 - * to know how many data are remaining for the current descriptor. 1452 - * Since the dma channel is not paused to not loose data, between the 1453 - * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of 1454 - * descriptor. 1455 - * For that reason, after reading AT_XDMAC_CUBC, we check if we are 1456 - * still using the same descriptor by reading a second time 1457 - * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to 1458 - * read again AT_XDMAC_CUBC. 1438 + * The easiest way to compute the residue should be to pause the DMA 1439 + * but doing this can lead to miss some data as some devices don't 1440 + * have FIFO. 1441 + * We need to read several registers because: 1442 + * - DMA is running therefore a descriptor change is possible while 1443 + * reading these registers 1444 + * - When the block transfer is done, the value of the CUBC register 1445 + * is set to its initial value until the fetch of the next descriptor. 1446 + * This value will corrupt the residue calculation so we have to skip 1447 + * it. 1448 + * 1449 + * INITD -------- ------------ 1450 + * |____________________| 1451 + * _______________________ _______________ 1452 + * NDA @desc2 \/ @desc3 1453 + * _______________________/\_______________ 1454 + * __________ ___________ _______________ 1455 + * CUBC 0 \/ MAX desc1 \/ MAX desc2 1456 + * __________/\___________/\_______________ 1457 + * 1458 + * Since descriptors are aligned on 64 bits, we can assume that 1459 + * the update of NDA and CUBC is atomic. 1459 1460 * Memory barriers are used to ensure the read order of the registers. 1460 - * A max number of retries is set because unlikely it can never ends if 1461 - * we are transferring a lot of data with small buffers. 1461 + * A max number of retries is set because unlikely it could never ends. 1462 1462 */ 1463 - cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1464 - rmb(); 1465 - cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); 1466 1463 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { 1467 - rmb(); 1468 1464 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1469 - 1470 - if (likely(cur_nda == check_nda)) 1471 - break; 1472 - 1473 - cur_nda = check_nda; 1465 + rmb(); 1466 + initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); 1474 1467 rmb(); 1475 1468 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); 1469 + rmb(); 1470 + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1471 + rmb(); 1472 + 1473 + if ((check_nda == cur_nda) && initd) 1474 + break; 1476 1475 } 1477 1476 1478 1477 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { 1479 1478 ret = DMA_ERROR; 1480 1479 goto spin_unlock; 1480 + } 1481 + 1482 + /* 1483 + * Flush FIFO: only relevant when the transfer is source peripheral 1484 + * synchronized. Another flush is needed here because CUBC is updated 1485 + * when the controller sends the data write command. It can lead to 1486 + * report data that are not written in the memory or the device. The 1487 + * FIFO flush ensures that data are really written. 1488 + */ 1489 + if ((desc->lld.mbr_cfg & mask) == value) { 1490 + at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); 1491 + while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) 1492 + cpu_relax(); 1481 1493 } 1482 1494 1483 1495 /*
+6 -4
drivers/dma/mv_xor.c
··· 703 703 goto free_resources; 704 704 } 705 705 706 - src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 707 - PAGE_SIZE, DMA_TO_DEVICE); 706 + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 707 + (size_t)src & ~PAGE_MASK, PAGE_SIZE, 708 + DMA_TO_DEVICE); 708 709 unmap->addr[0] = src_dma; 709 710 710 711 ret = dma_mapping_error(dma_chan->device->dev, src_dma); ··· 715 714 } 716 715 unmap->to_cnt = 1; 717 716 718 - dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 719 - PAGE_SIZE, DMA_FROM_DEVICE); 717 + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 718 + (size_t)dest & ~PAGE_MASK, PAGE_SIZE, 719 + DMA_FROM_DEVICE); 720 720 unmap->addr[1] = dest_dma; 721 721 722 722 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);