Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dmaengine-fix-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
"A bunch of driver fixes with idxd ones being the biggest:

- Xilinx regmap init error handling, dma_device directions, residue
calculation, and reset related timeout fixes

- Renesas CHCTRL updates and driver list fixes

- DW HDMA cycle bits and MSI data programming fix

- IDXD pile of fixes for memeory leak and FLR fixes"

* tag 'dmaengine-fix-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (21 commits)
dmaengine: xilinx_dma: Fix reset related timeout with two-channel AXIDMA
dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
dmaengine: xilinx: xilinx_dma: Fix dma_device directions
dmaengine: sh: rz-dmac: Move CHCTRL updates under spinlock
dmaengine: sh: rz-dmac: Protect the driver specific lists
dmaengine: idxd: fix possible wrong descriptor completion in llist_abort_desc()
dmaengine: xilinx: xdma: Fix regmap init error handling
dmaengine: dw-edma: Fix multiple times setting of the CYCLE_STATE and CYCLE_BIT bits for HDMA.
dmaengine: idxd: Fix leaking event log memory
dmaengine: idxd: Fix freeing the allocated ida too late
dmaengine: idxd: Fix memory leak when a wq is reset
dmaengine: idxd: Fix not releasing workqueue on .release()
dmaengine: idxd: Wait for submitted operations on .device_synchronize()
dmaengine: idxd: Flush all pending descriptors
dmaengine: idxd: Flush kernel workqueues on Function Level Reset
dmaengine: idxd: Fix possible invalid memory access after FLR
dmaengine: idxd: Fix crash when the event log is disabled
dmaengine: idxd: Fix lockdep warnings when calling idxd_device_config()
dmaengine: dw-edma: fix MSI data programming for multi-IRQ case
...

+168 -95
+6 -2
drivers/dma/dw-edma/dw-edma-core.c
··· 844 844 { 845 845 struct dw_edma_chip *chip = dw->chip; 846 846 struct device *dev = dw->chip->dev; 847 + struct msi_desc *msi_desc; 847 848 u32 wr_mask = 1; 848 849 u32 rd_mask = 1; 849 850 int i, err = 0; ··· 896 895 &dw->irq[i]); 897 896 if (err) 898 897 goto err_irq_free; 899 - 900 - if (irq_get_msi_desc(irq)) 898 + msi_desc = irq_get_msi_desc(irq); 899 + if (msi_desc) { 901 900 get_cached_msi_msg(irq, &dw->irq[i].msi); 901 + if (!msi_desc->pci.msi_attrib.is_msix) 902 + dw->irq[i].msi.data = dw->irq[0].msi.data + i; 903 + } 902 904 } 903 905 904 906 dw->nr_irqs = i;
+3 -3
drivers/dma/dw-edma/dw-hdma-v0-core.c
··· 252 252 lower_32_bits(chunk->ll_region.paddr)); 253 253 SET_CH_32(dw, chan->dir, chan->id, llp.msb, 254 254 upper_32_bits(chunk->ll_region.paddr)); 255 + /* Set consumer cycle */ 256 + SET_CH_32(dw, chan->dir, chan->id, cycle_sync, 257 + HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); 255 258 } 256 - /* Set consumer cycle */ 257 - SET_CH_32(dw, chan->dir, chan->id, cycle_sync, 258 - HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); 259 259 260 260 dw_hdma_v0_sync_ll_data(chunk); 261 261
+11 -15
drivers/dma/fsl-edma-main.c
··· 317 317 return NULL; 318 318 i = fsl_chan - fsl_edma->chans; 319 319 320 - fsl_chan->priority = dma_spec->args[1]; 321 - fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX; 322 - fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE; 323 - fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO; 320 + if (!b_chmux && i != dma_spec->args[0]) 321 + continue; 324 322 325 323 if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1)) 326 324 continue; ··· 326 328 if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1)) 327 329 continue; 328 330 329 - if (!b_chmux && i == dma_spec->args[0]) { 330 - chan = dma_get_slave_channel(chan); 331 - chan->device->privatecnt++; 332 - return chan; 333 - } else if (b_chmux && !fsl_chan->srcid) { 334 - /* if controller support channel mux, choose a free channel */ 335 - chan = dma_get_slave_channel(chan); 336 - chan->device->privatecnt++; 337 - fsl_chan->srcid = dma_spec->args[0]; 338 - return chan; 339 - } 331 + fsl_chan->srcid = dma_spec->args[0]; 332 + fsl_chan->priority = dma_spec->args[1]; 333 + fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX; 334 + fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE; 335 + fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO; 336 + 337 + chan = dma_get_slave_channel(chan); 338 + chan->device->privatecnt++; 339 + return chan; 340 340 } 341 341 return NULL; 342 342 }
+4 -4
drivers/dma/idxd/cdev.c
··· 158 158 static void idxd_cdev_dev_release(struct device *dev) 159 159 { 160 160 struct idxd_cdev *idxd_cdev = dev_to_cdev(dev); 161 - struct idxd_cdev_context *cdev_ctx; 162 - struct idxd_wq *wq = idxd_cdev->wq; 163 161 164 - cdev_ctx = &ictx[wq->idxd->data->type]; 165 - ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor); 166 162 kfree(idxd_cdev); 167 163 } 168 164 ··· 578 582 579 583 void idxd_wq_del_cdev(struct idxd_wq *wq) 580 584 { 585 + struct idxd_cdev_context *cdev_ctx; 581 586 struct idxd_cdev *idxd_cdev; 582 587 583 588 idxd_cdev = wq->idxd_cdev; 584 589 wq->idxd_cdev = NULL; 585 590 cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); 591 + 592 + cdev_ctx = &ictx[wq->idxd->data->type]; 593 + ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor); 586 594 put_device(cdev_dev(idxd_cdev)); 587 595 } 588 596
+31 -14
drivers/dma/idxd/device.c
··· 175 175 free_descs(wq); 176 176 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 177 177 sbitmap_queue_free(&wq->sbq); 178 + wq->type = IDXD_WQT_NONE; 178 179 } 179 180 EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD"); 180 181 ··· 383 382 lockdep_assert_held(&wq->wq_lock); 384 383 wq->state = IDXD_WQ_DISABLED; 385 384 memset(wq->wqcfg, 0, idxd->wqcfg_size); 386 - wq->type = IDXD_WQT_NONE; 387 385 wq->threshold = 0; 388 386 wq->priority = 0; 389 387 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; ··· 831 831 struct device *dev = &idxd->pdev->dev; 832 832 struct idxd_evl *evl = idxd->evl; 833 833 834 - gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 835 - if (!gencfg.evl_en) 834 + if (!evl) 836 835 return; 837 836 838 837 mutex_lock(&evl->lock); ··· 1124 1125 { 1125 1126 int rc; 1126 1127 1127 - lockdep_assert_held(&idxd->dev_lock); 1128 + guard(spinlock)(&idxd->dev_lock); 1129 + 1130 + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1131 + return 0; 1132 + 1128 1133 rc = idxd_wqs_setup(idxd); 1129 1134 if (rc < 0) 1130 1135 return rc; ··· 1335 1332 1336 1333 free_irq(ie->vector, ie); 1337 1334 idxd_flush_pending_descs(ie); 1335 + 1336 + /* The interrupt might have been already released by FLR */ 1337 + if (ie->int_handle == INVALID_INT_HANDLE) 1338 + return; 1339 + 1338 1340 if (idxd->request_int_handles) 1339 1341 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 1340 1342 idxd_device_clear_perm_entry(idxd, ie); 1341 1343 ie->vector = -1; 1342 1344 ie->int_handle = INVALID_INT_HANDLE; 1343 1345 ie->pasid = IOMMU_PASID_INVALID; 1346 + } 1347 + 1348 + void idxd_wq_flush_descs(struct idxd_wq *wq) 1349 + { 1350 + struct idxd_irq_entry *ie = &wq->ie; 1351 + struct idxd_device *idxd = wq->idxd; 1352 + 1353 + guard(mutex)(&wq->wq_lock); 1354 + 1355 + if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) 1356 + return; 1357 + 1358 + idxd_flush_pending_descs(ie); 1359 + if (idxd->request_int_handles) 1360 + idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 1361 + idxd_device_clear_perm_entry(idxd, ie); 1362 + ie->int_handle = INVALID_INT_HANDLE; 1344 1363 } 1345 1364 1346 1365 int idxd_wq_request_irq(struct idxd_wq *wq) ··· 1479 1454 } 1480 1455 } 1481 1456 1482 - rc = 0; 1483 - spin_lock(&idxd->dev_lock); 1484 - if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1485 - rc = idxd_device_config(idxd); 1486 - spin_unlock(&idxd->dev_lock); 1457 + rc = idxd_device_config(idxd); 1487 1458 if (rc < 0) { 1488 1459 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc); 1489 1460 goto err; ··· 1554 1533 idxd_wq_reset(wq); 1555 1534 idxd_wq_free_resources(wq); 1556 1535 percpu_ref_exit(&wq->wq_active); 1557 - wq->type = IDXD_WQT_NONE; 1558 1536 wq->client_count = 0; 1559 1537 } 1560 1538 EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD"); ··· 1574 1554 } 1575 1555 1576 1556 /* Device configuration */ 1577 - spin_lock(&idxd->dev_lock); 1578 - if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1579 - rc = idxd_device_config(idxd); 1580 - spin_unlock(&idxd->dev_lock); 1557 + rc = idxd_device_config(idxd); 1581 1558 if (rc < 0) 1582 1559 return -ENXIO; 1583 1560
+18
drivers/dma/idxd/dma.c
··· 194 194 kfree(idxd_dma); 195 195 } 196 196 197 + static int idxd_dma_terminate_all(struct dma_chan *c) 198 + { 199 + struct idxd_wq *wq = to_idxd_wq(c); 200 + 201 + idxd_wq_flush_descs(wq); 202 + 203 + return 0; 204 + } 205 + 206 + static void idxd_dma_synchronize(struct dma_chan *c) 207 + { 208 + struct idxd_wq *wq = to_idxd_wq(c); 209 + 210 + idxd_wq_drain(wq); 211 + } 212 + 197 213 int idxd_register_dma_device(struct idxd_device *idxd) 198 214 { 199 215 struct idxd_dma_dev *idxd_dma; ··· 240 224 dma->device_issue_pending = idxd_dma_issue_pending; 241 225 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; 242 226 dma->device_free_chan_resources = idxd_dma_free_chan_resources; 227 + dma->device_terminate_all = idxd_dma_terminate_all; 228 + dma->device_synchronize = idxd_dma_synchronize; 243 229 244 230 rc = dma_async_device_register(dma); 245 231 if (rc < 0) {
+1
drivers/dma/idxd/idxd.h
··· 803 803 int idxd_wq_init_percpu_ref(struct idxd_wq *wq); 804 804 void idxd_wq_free_irq(struct idxd_wq *wq); 805 805 int idxd_wq_request_irq(struct idxd_wq *wq); 806 + void idxd_wq_flush_descs(struct idxd_wq *wq); 806 807 807 808 /* submission */ 808 809 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+7 -7
drivers/dma/idxd/init.c
··· 973 973 974 974 idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit; 975 975 976 - idxd->evl->size = saved_evl->size; 976 + if (idxd->evl) 977 + idxd->evl->size = saved_evl->size; 977 978 978 979 for (i = 0; i < idxd->max_groups; i++) { 979 980 struct idxd_group *saved_group, *group; ··· 1105 1104 idxd_device_config_restore(idxd, idxd->idxd_saved); 1106 1105 1107 1106 /* Re-configure IDXD device if allowed. */ 1108 - if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 1109 - rc = idxd_device_config(idxd); 1110 - if (rc < 0) { 1111 - dev_err(dev, "HALT: %s config fails\n", idxd_name); 1112 - goto out; 1113 - } 1107 + rc = idxd_device_config(idxd); 1108 + if (rc < 0) { 1109 + dev_err(dev, "HALT: %s config fails\n", idxd_name); 1110 + goto out; 1114 1111 } 1115 1112 1116 1113 /* Bind IDXD device to driver. */ ··· 1146 1147 } 1147 1148 out: 1148 1149 kfree(idxd->idxd_saved); 1150 + idxd->idxd_saved = NULL; 1149 1151 } 1150 1152 1151 1153 static const struct pci_error_handlers idxd_error_handler = {
+16
drivers/dma/idxd/irq.c
··· 397 397 dev_err(&idxd->pdev->dev, "FLR failed\n"); 398 398 } 399 399 400 + static void idxd_wqs_flush_descs(struct idxd_device *idxd) 401 + { 402 + int i; 403 + 404 + for (i = 0; i < idxd->max_wqs; i++) { 405 + struct idxd_wq *wq = idxd->wqs[i]; 406 + 407 + idxd_wq_flush_descs(wq); 408 + } 409 + } 410 + 400 411 static irqreturn_t idxd_halt(struct idxd_device *idxd) 401 412 { 402 413 union gensts_reg gensts; ··· 426 415 } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) { 427 416 idxd->state = IDXD_DEV_HALTED; 428 417 idxd_mask_error_interrupts(idxd); 418 + /* Flush all pending descriptors, and disable 419 + * interrupts, they will be re-enabled when FLR 420 + * concludes. 421 + */ 422 + idxd_wqs_flush_descs(idxd); 429 423 dev_dbg(&idxd->pdev->dev, 430 424 "idxd halted, doing FLR. After FLR, configs are restored\n"); 431 425 INIT_WORK(&idxd->work, idxd_device_flr);
+1 -1
drivers/dma/idxd/submit.c
··· 138 138 */ 139 139 list_for_each_entry_safe(d, t, &flist, list) { 140 140 list_del_init(&d->list); 141 - idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true, 141 + idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true, 142 142 NULL, NULL); 143 143 } 144 144 }
+1
drivers/dma/idxd/sysfs.c
··· 1836 1836 { 1837 1837 struct idxd_device *idxd = confdev_to_idxd(dev); 1838 1838 1839 + destroy_workqueue(idxd->wq); 1839 1840 kfree(idxd->groups); 1840 1841 bitmap_free(idxd->wq_enable_map); 1841 1842 kfree(idxd->wqs);
+37 -31
drivers/dma/sh/rz-dmac.c
··· 10 10 */ 11 11 12 12 #include <linux/bitfield.h> 13 + #include <linux/cleanup.h> 13 14 #include <linux/dma-mapping.h> 14 15 #include <linux/dmaengine.h> 15 16 #include <linux/interrupt.h> ··· 297 296 { 298 297 struct dma_chan *chan = &channel->vc.chan; 299 298 struct rz_dmac *dmac = to_rz_dmac(chan->device); 300 - unsigned long flags; 301 299 302 300 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 303 301 304 - local_irq_save(flags); 305 302 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 306 - local_irq_restore(flags); 307 303 } 308 304 309 305 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) ··· 445 447 if (!desc) 446 448 break; 447 449 450 + /* No need to lock. This is called only for the 1st client. */ 448 451 list_add_tail(&desc->node, &channel->ld_free); 449 452 channel->descs_allocated++; 450 453 } ··· 501 502 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 502 503 __func__, channel->index, &src, &dest, len); 503 504 504 - if (list_empty(&channel->ld_free)) 505 - return NULL; 505 + scoped_guard(spinlock_irqsave, &channel->vc.lock) { 506 + if (list_empty(&channel->ld_free)) 507 + return NULL; 506 508 507 - desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 509 + desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 508 510 509 - desc->type = RZ_DMAC_DESC_MEMCPY; 510 - desc->src = src; 511 - desc->dest = dest; 512 - desc->len = len; 513 - desc->direction = DMA_MEM_TO_MEM; 511 + desc->type = RZ_DMAC_DESC_MEMCPY; 512 + desc->src = src; 513 + desc->dest = dest; 514 + desc->len = len; 515 + desc->direction = DMA_MEM_TO_MEM; 514 516 515 - list_move_tail(channel->ld_free.next, &channel->ld_queue); 517 + list_move_tail(channel->ld_free.next, &channel->ld_queue); 518 + } 519 + 516 520 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 517 521 } 518 522 ··· 531 529 int dma_length = 0; 532 530 int i = 0; 533 531 534 - if (list_empty(&channel->ld_free)) 535 - return NULL; 532 + scoped_guard(spinlock_irqsave, &channel->vc.lock) { 533 + if (list_empty(&channel->ld_free)) 534 + return NULL; 536 535 537 - desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 536 + desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 538 537 539 - for_each_sg(sgl, sg, sg_len, i) { 540 - dma_length += sg_dma_len(sg); 538 + for_each_sg(sgl, sg, sg_len, i) 539 + dma_length += sg_dma_len(sg); 540 + 541 + desc->type = RZ_DMAC_DESC_SLAVE_SG; 542 + desc->sg = sgl; 543 + desc->sgcount = sg_len; 544 + desc->len = dma_length; 545 + desc->direction = direction; 546 + 547 + if (direction == DMA_DEV_TO_MEM) 548 + desc->src = channel->src_per_address; 549 + else 550 + desc->dest = channel->dst_per_address; 551 + 552 + list_move_tail(channel->ld_free.next, &channel->ld_queue); 541 553 } 542 554 543 - desc->type = RZ_DMAC_DESC_SLAVE_SG; 544 - desc->sg = sgl; 545 - desc->sgcount = sg_len; 546 - desc->len = dma_length; 547 - desc->direction = direction; 548 - 549 - if (direction == DMA_DEV_TO_MEM) 550 - desc->src = channel->src_per_address; 551 - else 552 - desc->dest = channel->dst_per_address; 553 - 554 - list_move_tail(channel->ld_free.next, &channel->ld_queue); 555 555 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 556 556 } 557 557 ··· 565 561 unsigned int i; 566 562 LIST_HEAD(head); 567 563 568 - rz_dmac_disable_hw(channel); 569 564 spin_lock_irqsave(&channel->vc.lock, flags); 565 + rz_dmac_disable_hw(channel); 570 566 for (i = 0; i < DMAC_NR_LMDESC; i++) 571 567 lmdesc[i].header = 0; 572 568 ··· 703 699 if (chstat & CHSTAT_ER) { 704 700 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 705 701 channel->index, chstat); 706 - rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 702 + 703 + scoped_guard(spinlock_irqsave, &channel->vc.lock) 704 + rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 707 705 goto done; 708 706 } 709 707
+2 -2
drivers/dma/xilinx/xdma.c
··· 1234 1234 1235 1235 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, 1236 1236 &xdma_regmap_config); 1237 - if (!xdev->rmap) { 1238 - xdma_err(xdev, "config regmap failed: %d", ret); 1237 + if (IS_ERR(xdev->rmap)) { 1238 + xdma_err(xdev, "config regmap failed: %pe", xdev->rmap); 1239 1239 goto failed; 1240 1240 } 1241 1241 INIT_LIST_HEAD(&xdev->dma_dev.channels);
+30 -16
drivers/dma/xilinx/xilinx_dma.c
··· 997 997 struct xilinx_cdma_tx_segment, 998 998 node); 999 999 cdma_hw = &cdma_seg->hw; 1000 - residue += (cdma_hw->control - cdma_hw->status) & 1001 - chan->xdev->max_buffer_len; 1000 + residue += (cdma_hw->control & chan->xdev->max_buffer_len) - 1001 + (cdma_hw->status & chan->xdev->max_buffer_len); 1002 1002 } else if (chan->xdev->dma_config->dmatype == 1003 1003 XDMA_TYPE_AXIDMA) { 1004 1004 axidma_seg = list_entry(entry, 1005 1005 struct xilinx_axidma_tx_segment, 1006 1006 node); 1007 1007 axidma_hw = &axidma_seg->hw; 1008 - residue += (axidma_hw->control - axidma_hw->status) & 1009 - chan->xdev->max_buffer_len; 1008 + residue += (axidma_hw->control & chan->xdev->max_buffer_len) - 1009 + (axidma_hw->status & chan->xdev->max_buffer_len); 1010 1010 } else { 1011 1011 aximcdma_seg = 1012 1012 list_entry(entry, ··· 1014 1014 node); 1015 1015 aximcdma_hw = &aximcdma_seg->hw; 1016 1016 residue += 1017 - (aximcdma_hw->control - aximcdma_hw->status) & 1018 - chan->xdev->max_buffer_len; 1017 + (aximcdma_hw->control & chan->xdev->max_buffer_len) - 1018 + (aximcdma_hw->status & chan->xdev->max_buffer_len); 1019 1019 } 1020 1020 } 1021 1021 ··· 1234 1234 } 1235 1235 1236 1236 dma_cookie_init(dchan); 1237 - 1238 - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1239 - /* For AXI DMA resetting once channel will reset the 1240 - * other channel as well so enable the interrupts here. 1241 - */ 1242 - dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1243 - XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1244 - } 1245 1237 1246 1238 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 1247 1239 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, ··· 1556 1564 if (chan->err) 1557 1565 return; 1558 1566 1559 - if (list_empty(&chan->pending_list)) 1567 + if (list_empty(&chan->pending_list)) { 1568 + if (chan->cyclic) { 1569 + struct xilinx_dma_tx_descriptor *desc; 1570 + struct list_head *entry; 1571 + 1572 + desc = list_last_entry(&chan->done_list, 1573 + struct xilinx_dma_tx_descriptor, node); 1574 + list_for_each(entry, &desc->segments) { 1575 + struct xilinx_axidma_tx_segment *axidma_seg; 1576 + struct xilinx_axidma_desc_hw *axidma_hw; 1577 + axidma_seg = list_entry(entry, 1578 + struct xilinx_axidma_tx_segment, 1579 + node); 1580 + axidma_hw = &axidma_seg->hw; 1581 + axidma_hw->status = 0; 1582 + } 1583 + 1584 + list_splice_tail_init(&chan->done_list, &chan->active_list); 1585 + chan->desc_pendingcount = 0; 1586 + chan->idle = false; 1587 + } 1560 1588 return; 1589 + } 1561 1590 1562 1591 if (!chan->idle) 1563 1592 return; ··· 1604 1591 head_desc->async_tx.phys); 1605 1592 reg &= ~XILINX_DMA_CR_DELAY_MAX; 1606 1593 reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT; 1594 + reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK; 1607 1595 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1608 1596 1609 1597 xilinx_dma_start(chan); ··· 3038 3024 return -EINVAL; 3039 3025 } 3040 3026 3041 - xdev->common.directions |= chan->direction; 3027 + xdev->common.directions |= BIT(chan->direction); 3042 3028 3043 3029 /* Request the interrupt */ 3044 3030 chan->irq = of_irq_get(node, chan->tdest);