Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

dmaengine: sh: rz-dmac: Protect the driver specific lists

The driver lists (ld_free, ld_queue) are used in
rz_dmac_free_chan_resources(), rz_dmac_terminate_all(),
rz_dmac_issue_pending(), and rz_dmac_irq_handler_thread(), all under
the virtual channel lock. Take the same lock in rz_dmac_prep_slave_sg()
and rz_dmac_prep_dma_memcpy() as well to avoid concurrency issues, since
these functions also check whether the lists are empty and update or
remove list entries.

Fixes: 5000d37042a6 ("dmaengine: sh: Add DMAC driver for RZ/G2L SoC")
Cc: stable@vger.kernel.org
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Link: https://patch.msgid.link/20260316133252.240348-2-claudiu.beznea.uj@bp.renesas.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Claudiu Beznea and committed by
Vinod Koul
abb863e6 e1c98661

+33 -26
+33 -26
drivers/dma/sh/rz-dmac.c
··· 10 10 */ 11 11 12 12 #include <linux/bitfield.h> 13 + #include <linux/cleanup.h> 13 14 #include <linux/dma-mapping.h> 14 15 #include <linux/dmaengine.h> 15 16 #include <linux/interrupt.h> ··· 448 447 if (!desc) 449 448 break; 450 449 450 + /* No need to lock. This is called only for the 1st client. */ 451 451 list_add_tail(&desc->node, &channel->ld_free); 452 452 channel->descs_allocated++; 453 453 } ··· 504 502 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 505 503 __func__, channel->index, &src, &dest, len); 506 504 507 - if (list_empty(&channel->ld_free)) 508 - return NULL; 505 + scoped_guard(spinlock_irqsave, &channel->vc.lock) { 506 + if (list_empty(&channel->ld_free)) 507 + return NULL; 509 508 510 - desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 509 + desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 511 510 512 - desc->type = RZ_DMAC_DESC_MEMCPY; 513 - desc->src = src; 514 - desc->dest = dest; 515 - desc->len = len; 516 - desc->direction = DMA_MEM_TO_MEM; 511 + desc->type = RZ_DMAC_DESC_MEMCPY; 512 + desc->src = src; 513 + desc->dest = dest; 514 + desc->len = len; 515 + desc->direction = DMA_MEM_TO_MEM; 517 516 518 - list_move_tail(channel->ld_free.next, &channel->ld_queue); 517 + list_move_tail(channel->ld_free.next, &channel->ld_queue); 518 + } 519 + 519 520 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 520 521 } 521 522 ··· 534 529 int dma_length = 0; 535 530 int i = 0; 536 531 537 - if (list_empty(&channel->ld_free)) 538 - return NULL; 532 + scoped_guard(spinlock_irqsave, &channel->vc.lock) { 533 + if (list_empty(&channel->ld_free)) 534 + return NULL; 539 535 540 - desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 536 + desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 541 537 542 - for_each_sg(sgl, sg, sg_len, i) { 543 - dma_length += sg_dma_len(sg); 538 + for_each_sg(sgl, sg, sg_len, i) 539 + dma_length += sg_dma_len(sg); 540 + 541 + desc->type = RZ_DMAC_DESC_SLAVE_SG; 542 + desc->sg = sgl; 543 + desc->sgcount = sg_len; 544 + desc->len = dma_length; 545 + desc->direction = direction; 546 + 547 + if (direction == DMA_DEV_TO_MEM) 548 + desc->src = channel->src_per_address; 549 + else 550 + desc->dest = channel->dst_per_address; 551 + 552 + list_move_tail(channel->ld_free.next, &channel->ld_queue); 544 553 } 545 554 546 - desc->type = RZ_DMAC_DESC_SLAVE_SG; 547 - desc->sg = sgl; 548 - desc->sgcount = sg_len; 549 - desc->len = dma_length; 550 - desc->direction = direction; 551 - 552 - if (direction == DMA_DEV_TO_MEM) 553 - desc->src = channel->src_per_address; 554 - else 555 - desc->dest = channel->dst_per_address; 556 - 557 - list_move_tail(channel->ld_free.next, &channel->ld_queue); 558 555 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 559 556 } 560 557