Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

dmaengine: dw-edma: Add non-LL mode

AMD MDB IP supports Linked List (LL) mode as well as non-LL mode.
The current code does not have the mechanisms to enable the
DMA transactions using the non-LL mode. The following two cases
are added with this patch:
- For the AMD (Xilinx) only, when a valid physical base address of
the device side DDR is not configured, then the IP can still be
used in non-LL mode. For all the channels DMA transactions will
be using the non-LL mode only. This, the default non-LL mode,
is not applicable for Synopsys IP with the current code addition.

- If the default mode is LL-mode, for both AMD (Xilinx) and Synosys,
and if user wants to use non-LL mode then user can do so via
configuring the peripheral_config param of dma_slave_config.

Signed-off-by: Devendra K Verma <devendra.verma@amd.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://patch.msgid.link/20260318070403.1634706-3-devendra.verma@amd.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Devendra K Verma and committed by
Vinod Koul
b7560798 14eb9a1d

+143 -15
+46 -1
drivers/dma/dw-edma/dw-edma-core.c
··· 223 223 struct dma_slave_config *config) 224 224 { 225 225 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 226 + bool cfg_non_ll; 227 + int non_ll = 0; 228 + 229 + chan->non_ll = false; 230 + if (chan->dw->chip->mf == EDMA_MF_HDMA_NATIVE) { 231 + if (config->peripheral_config && 232 + config->peripheral_size != sizeof(int)) { 233 + dev_err(dchan->device->dev, 234 + "config param peripheral size mismatch\n"); 235 + return -EINVAL; 236 + } 237 + 238 + /* 239 + * When there is no valid LLP base address available then the 240 + * default DMA ops will use the non-LL mode. 241 + * 242 + * Cases where LL mode is enabled and client wants to use the 243 + * non-LL mode then also client can do so via providing the 244 + * peripheral_config param. 245 + */ 246 + cfg_non_ll = chan->dw->chip->cfg_non_ll; 247 + if (config->peripheral_config) { 248 + non_ll = *(int *)config->peripheral_config; 249 + 250 + if (cfg_non_ll && !non_ll) { 251 + dev_err(dchan->device->dev, "invalid configuration\n"); 252 + return -EINVAL; 253 + } 254 + } 255 + 256 + if (cfg_non_ll || non_ll) 257 + chan->non_ll = true; 258 + } else if (config->peripheral_config) { 259 + dev_err(dchan->device->dev, 260 + "peripheral config param applicable only for HDMA\n"); 261 + return -EINVAL; 262 + } 226 263 227 264 memcpy(&chan->config, config, sizeof(*config)); 228 265 chan->configured = true; ··· 395 358 struct dw_edma_desc *desc; 396 359 u64 src_addr, dst_addr; 397 360 size_t fsz = 0; 361 + u32 bursts_max; 398 362 u32 cnt = 0; 399 363 int i; 400 364 ··· 453 415 return NULL; 454 416 } 455 417 418 + /* 419 + * For non-LL mode, only a single burst can be handled 420 + * in a single chunk unlike LL mode where multiple bursts 421 + * can be configured in a single chunk. 422 + */ 423 + bursts_max = chan->non_ll ? 1 : chan->ll_max; 424 + 456 425 desc = dw_edma_alloc_desc(chan); 457 426 if (unlikely(!desc)) 458 427 goto err_alloc; ··· 495 450 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) 496 451 break; 497 452 498 - if (chunk->bursts_alloc == chan->ll_max) { 453 + if (chunk->bursts_alloc == bursts_max) { 499 454 chunk = dw_edma_alloc_chunk(desc); 500 455 if (unlikely(!chunk)) 501 456 goto err_alloc;
+1
drivers/dma/dw-edma/dw-edma-core.h
··· 86 86 u8 configured; 87 87 88 88 struct dma_slave_config config; 89 + bool non_ll; 89 90 }; 90 91 91 92 struct dw_edma_irq {
+31 -13
drivers/dma/dw-edma/dw-edma-pcie.c
··· 295 295 pdata->devmem_phys_off = off; 296 296 } 297 297 298 + static u64 dw_edma_get_phys_addr(struct pci_dev *pdev, 299 + struct dw_edma_pcie_data *pdata, 300 + enum pci_barno bar) 301 + { 302 + if (pdev->vendor == PCI_VENDOR_ID_XILINX) 303 + return pdata->devmem_phys_off; 304 + return pci_bus_address(pdev, bar); 305 + } 306 + 298 307 static int dw_edma_pcie_probe(struct pci_dev *pdev, 299 308 const struct pci_device_id *pid) 300 309 { ··· 312 303 struct dw_edma_chip *chip; 313 304 int err, nr_irqs; 314 305 int i, mask; 306 + bool non_ll = false; 315 307 316 308 struct dw_edma_pcie_data *vsec_data __free(kfree) = 317 309 kmalloc_obj(*vsec_data); ··· 339 329 340 330 /* 341 331 * There is no valid address found for the LL memory 342 - * space on the device side. 332 + * space on the device side. In the absence of LL base 333 + * address use the non-LL mode or simple mode supported by 334 + * the HDMA IP. 343 335 */ 344 336 if (vsec_data->devmem_phys_off == DW_PCIE_XILINX_MDB_INVALID_ADDR) 345 - return -ENOMEM; 337 + non_ll = true; 346 338 347 339 /* 348 340 * Configure the channel LL and data blocks if number of 349 341 * channels enabled in VSEC capability are more than the 350 342 * channels configured in xilinx_mdb_data. 351 343 */ 352 - dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0, 353 - DW_PCIE_XILINX_MDB_LL_OFF_GAP, 354 - DW_PCIE_XILINX_MDB_LL_SIZE, 355 - DW_PCIE_XILINX_MDB_DT_OFF_GAP, 356 - DW_PCIE_XILINX_MDB_DT_SIZE); 344 + if (!non_ll) 345 + dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0, 346 + DW_PCIE_XILINX_MDB_LL_OFF_GAP, 347 + DW_PCIE_XILINX_MDB_LL_SIZE, 348 + DW_PCIE_XILINX_MDB_DT_OFF_GAP, 349 + DW_PCIE_XILINX_MDB_DT_SIZE); 357 350 } 358 351 359 352 /* Mapping PCI BAR regions */ ··· 404 391 chip->mf = vsec_data->mf; 405 392 chip->nr_irqs = nr_irqs; 406 393 chip->ops = &dw_edma_pcie_plat_ops; 394 + chip->cfg_non_ll = non_ll; 407 395 408 396 chip->ll_wr_cnt = vsec_data->wr_ch_cnt; 409 397 chip->ll_rd_cnt = vsec_data->rd_ch_cnt; ··· 413 399 if (!chip->reg_base) 414 400 return -ENOMEM; 415 401 416 - for (i = 0; i < chip->ll_wr_cnt; i++) { 402 + for (i = 0; i < chip->ll_wr_cnt && !non_ll; i++) { 417 403 struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; 418 404 struct dw_edma_region *dt_region = &chip->dt_region_wr[i]; 419 405 struct dw_edma_block *ll_block = &vsec_data->ll_wr[i]; ··· 424 410 return -ENOMEM; 425 411 426 412 ll_region->vaddr.io += ll_block->off; 427 - ll_region->paddr = pci_bus_address(pdev, ll_block->bar); 413 + ll_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data, 414 + ll_block->bar); 428 415 ll_region->paddr += ll_block->off; 429 416 ll_region->sz = ll_block->sz; 430 417 ··· 434 419 return -ENOMEM; 435 420 436 421 dt_region->vaddr.io += dt_block->off; 437 - dt_region->paddr = pci_bus_address(pdev, dt_block->bar); 422 + dt_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data, 423 + dt_block->bar); 438 424 dt_region->paddr += dt_block->off; 439 425 dt_region->sz = dt_block->sz; 440 426 } 441 427 442 - for (i = 0; i < chip->ll_rd_cnt; i++) { 428 + for (i = 0; i < chip->ll_rd_cnt && !non_ll; i++) { 443 429 struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; 444 430 struct dw_edma_region *dt_region = &chip->dt_region_rd[i]; 445 431 struct dw_edma_block *ll_block = &vsec_data->ll_rd[i]; ··· 451 435 return -ENOMEM; 452 436 453 437 ll_region->vaddr.io += ll_block->off; 454 - ll_region->paddr = pci_bus_address(pdev, ll_block->bar); 438 + ll_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data, 439 + ll_block->bar); 455 440 ll_region->paddr += ll_block->off; 456 441 ll_region->sz = ll_block->sz; 457 442 ··· 461 444 return -ENOMEM; 462 445 463 446 dt_region->vaddr.io += dt_block->off; 464 - dt_region->paddr = pci_bus_address(pdev, dt_block->bar); 447 + dt_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data, 448 + dt_block->bar); 465 449 dt_region->paddr += dt_block->off; 466 450 dt_region->sz = dt_block->sz; 467 451 }
+63 -1
drivers/dma/dw-edma/dw-hdma-v0-core.c
··· 225 225 readl(chunk->ll_region.vaddr.io); 226 226 } 227 227 228 - static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) 228 + static void dw_hdma_v0_core_ll_start(struct dw_edma_chunk *chunk, bool first) 229 229 { 230 230 struct dw_edma_chan *chan = chunk->chan; 231 231 struct dw_edma *dw = chan->dw; ··· 261 261 262 262 /* Doorbell */ 263 263 SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START); 264 + } 265 + 266 + static void dw_hdma_v0_core_non_ll_start(struct dw_edma_chunk *chunk) 267 + { 268 + struct dw_edma_chan *chan = chunk->chan; 269 + struct dw_edma *dw = chan->dw; 270 + struct dw_edma_burst *child; 271 + u32 val; 272 + 273 + child = list_first_entry_or_null(&chunk->burst->list, 274 + struct dw_edma_burst, list); 275 + if (!child) 276 + return; 277 + 278 + SET_CH_32(dw, chan->dir, chan->id, ch_en, HDMA_V0_CH_EN); 279 + 280 + /* Source address */ 281 + SET_CH_32(dw, chan->dir, chan->id, sar.lsb, 282 + lower_32_bits(child->sar)); 283 + SET_CH_32(dw, chan->dir, chan->id, sar.msb, 284 + upper_32_bits(child->sar)); 285 + 286 + /* Destination address */ 287 + SET_CH_32(dw, chan->dir, chan->id, dar.lsb, 288 + lower_32_bits(child->dar)); 289 + SET_CH_32(dw, chan->dir, chan->id, dar.msb, 290 + upper_32_bits(child->dar)); 291 + 292 + /* Transfer size */ 293 + SET_CH_32(dw, chan->dir, chan->id, transfer_size, child->sz); 294 + 295 + /* Interrupt setup */ 296 + val = GET_CH_32(dw, chan->dir, chan->id, int_setup) | 297 + HDMA_V0_STOP_INT_MASK | 298 + HDMA_V0_ABORT_INT_MASK | 299 + HDMA_V0_LOCAL_STOP_INT_EN | 300 + HDMA_V0_LOCAL_ABORT_INT_EN; 301 + 302 + if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL)) { 303 + val |= HDMA_V0_REMOTE_STOP_INT_EN | 304 + HDMA_V0_REMOTE_ABORT_INT_EN; 305 + } 306 + 307 + SET_CH_32(dw, chan->dir, chan->id, int_setup, val); 308 + 309 + /* Channel control setup */ 310 + val = GET_CH_32(dw, chan->dir, chan->id, control1); 311 + val &= ~HDMA_V0_LINKLIST_EN; 312 + SET_CH_32(dw, chan->dir, chan->id, control1, val); 313 + 314 + SET_CH_32(dw, chan->dir, chan->id, doorbell, 315 + HDMA_V0_DOORBELL_START); 316 + } 317 + 318 + static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) 319 + { 320 + struct dw_edma_chan *chan = chunk->chan; 321 + 322 + if (chan->non_ll) 323 + dw_hdma_v0_core_non_ll_start(chunk); 324 + else 325 + dw_hdma_v0_core_ll_start(chunk, first); 264 326 } 265 327 266 328 static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
+1
drivers/dma/dw-edma/dw-hdma-v0-regs.h
··· 12 12 #include <linux/dmaengine.h> 13 13 14 14 #define HDMA_V0_MAX_NR_CH 8 15 + #define HDMA_V0_CH_EN BIT(0) 15 16 #define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6) 16 17 #define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5) 17 18 #define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
+1
include/linux/dma/edma.h
··· 103 103 enum dw_edma_map_format mf; 104 104 105 105 struct dw_edma *dw; 106 + bool cfg_non_ll; 106 107 }; 107 108 108 109 /* Export to the platform drivers */