Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dmaengine-fix-4.5-rc5' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"A few fixes for drivers, nothing major here.

Fixes are: iotdma fix to restart channels, new ID for wildcat PCH,
residue fix for edma, disable irq for non-cyclic in dw"

* tag 'dmaengine-fix-4.5-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: dw: disable BLOCK IRQs for non-cyclic xfer
dmaengine: edma: fix residue race for cyclic
dmaengine: dw: pci: add ID for WildcatPoint PCH
dmaengine: IOATDMA: fix timer code that continues to restart channels during idle

+76 -18
+10 -5
drivers/dma/dw/core.c
··· 156 156 157 157 /* Enable interrupts */ 158 158 channel_set_bit(dw, MASK.XFER, dwc->mask); 159 - channel_set_bit(dw, MASK.BLOCK, dwc->mask); 160 159 channel_set_bit(dw, MASK.ERROR, dwc->mask); 161 160 162 161 dwc->initialized = true; ··· 587 588 588 589 spin_unlock_irqrestore(&dwc->lock, flags); 589 590 } 591 + 592 + /* Re-enable interrupts */ 593 + channel_set_bit(dw, MASK.BLOCK, dwc->mask); 590 594 } 591 595 592 596 /* ------------------------------------------------------------------------- */ ··· 620 618 dwc_scan_descriptors(dw, dwc); 621 619 } 622 620 623 - /* 624 - * Re-enable interrupts. 625 - */ 621 + /* Re-enable interrupts */ 626 622 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 627 - channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); 628 623 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 629 624 } 630 625 ··· 1260 1261 int dw_dma_cyclic_start(struct dma_chan *chan) 1261 1262 { 1262 1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1264 + struct dw_dma *dw = to_dw_dma(chan->device); 1263 1265 unsigned long flags; 1264 1266 1265 1267 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { ··· 1269 1269 } 1270 1270 1271 1271 spin_lock_irqsave(&dwc->lock, flags); 1272 + 1273 + /* Enable interrupts to perform cyclic transfer */ 1274 + channel_set_bit(dw, MASK.BLOCK, dwc->mask); 1275 + 1272 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1277 + 1273 1278 spin_unlock_irqrestore(&dwc->lock, flags); 1274 1279 1275 1280 return 0;
+4
drivers/dma/dw/pci.c
··· 108 108 109 109 /* Haswell */ 110 110 { PCI_VDEVICE(INTEL, 0x9c60) }, 111 + 112 + /* Broadwell */ 113 + { PCI_VDEVICE(INTEL, 0x9ce0) }, 114 + 111 115 { } 112 116 }; 113 117 MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
+40 -1
drivers/dma/edma.c
··· 113 113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 114 114 #define CHMAP_EXIST BIT(24) 115 115 116 + /* CCSTAT register */ 117 + #define EDMA_CCSTAT_ACTV BIT(4) 118 + 116 119 /* 117 120 * Max of 20 segments per channel to conserve PaRAM slots 118 121 * Also note that MAX_NR_SG should be atleast the no.of periods ··· 1683 1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1684 1681 } 1685 1682 1683 + /* 1684 + * This limit exists to avoid a possible infinite loop when waiting for proof 1685 + * that a particular transfer is completed. This limit can be hit if there 1686 + * are large bursts to/from slow devices or the CPU is never able to catch 1687 + * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART 1688 + * RX-FIFO, as many as 55 loops have been seen. 1689 + */ 1690 + #define EDMA_MAX_TR_WAIT_LOOPS 1000 1691 + 1686 1692 static u32 edma_residue(struct edma_desc *edesc) 1687 1693 { 1688 1694 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1695 + int loop_count = EDMA_MAX_TR_WAIT_LOOPS; 1696 + struct edma_chan *echan = edesc->echan; 1689 1697 struct edma_pset *pset = edesc->pset; 1690 1698 dma_addr_t done, pos; 1691 1699 int i; ··· 1705 1691 * We always read the dst/src position from the first RamPar 1706 1692 * pset. That's the one which is active now. 1707 1693 */ 1708 - pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1694 + pos = edma_get_position(echan->ecc, echan->slot[0], dst); 1695 + 1696 + /* 1697 + * "pos" may represent a transfer request that is still being 1698 + * processed by the EDMACC or EDMATC. We will busy wait until 1699 + * any one of the situations occurs: 1700 + * 1. the DMA hardware is idle 1701 + * 2. a new transfer request is setup 1702 + * 3. we hit the loop limit 1703 + */ 1704 + while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) { 1705 + /* check if a new transfer request is setup */ 1706 + if (edma_get_position(echan->ecc, 1707 + echan->slot[0], dst) != pos) { 1708 + break; 1709 + } 1710 + 1711 + if (!--loop_count) { 1712 + dev_dbg_ratelimited(echan->vchan.chan.device->dev, 1713 + "%s: timeout waiting for PaRAM update\n", 1714 + __func__); 1715 + break; 1716 + } 1717 + 1718 + cpu_relax(); 1719 + } 1709 1720 1710 1721 /* 1711 1722 * Cyclic is simple. Just subtract pset[0].addr from pos.
+22 -12
drivers/dma/ioat/dma.c
··· 861 861 return; 862 862 } 863 863 864 + spin_lock_bh(&ioat_chan->cleanup_lock); 865 + 866 + /* handle the no-actives case */ 867 + if (!ioat_ring_active(ioat_chan)) { 868 + spin_lock_bh(&ioat_chan->prep_lock); 869 + check_active(ioat_chan); 870 + spin_unlock_bh(&ioat_chan->prep_lock); 871 + spin_unlock_bh(&ioat_chan->cleanup_lock); 872 + return; 873 + } 874 + 864 875 /* if we haven't made progress and we have already 865 876 * acknowledged a pending completion once, then be more 866 877 * forceful with a restart 867 878 */ 868 - spin_lock_bh(&ioat_chan->cleanup_lock); 869 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 870 880 __cleanup(ioat_chan, phys_complete); 871 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 882 + u32 chanerr; 883 + 884 + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 885 + dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); 886 + dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", 887 + status, chanerr); 888 + dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n", 889 + ioat_ring_active(ioat_chan)); 890 + 872 891 spin_lock_bh(&ioat_chan->prep_lock); 873 892 ioat_restart_channel(ioat_chan); 874 893 spin_unlock_bh(&ioat_chan->prep_lock); 875 894 spin_unlock_bh(&ioat_chan->cleanup_lock); 876 895 return; 877 - } else { 896 + } else 878 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 879 - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); 880 - } 881 898 882 - 883 - if (ioat_ring_active(ioat_chan)) 884 - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); 885 - else { 886 - spin_lock_bh(&ioat_chan->prep_lock); 887 - check_active(ioat_chan); 888 - spin_unlock_bh(&ioat_chan->prep_lock); 889 - } 899 + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); 890 900 spin_unlock_bh(&ioat_chan->cleanup_lock); 891 901 } 892 902