Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dmaengine-fix-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
"This has a bunch of idxd driver fixes, dmatest revert and bunch of
smaller driver fixes:

- a bunch of idxd potential mem leak fixes

- dmatest revert for waiting for interrupt fix as that causes issue

- a couple of ti k3 udma fixes for locking and cap_mask

- mediatek deadlock fix and unused variable cleanup fix"

* tag 'dmaengine-fix-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
dmaengine: mediatek: drop unused variable
dmaengine: fsl-edma: Fix return code for unhandled interrupts
dmaengine: mediatek: Fix a possible deadlock error in mtk_cqdma_tx_status()
dmaengine: idxd: Fix ->poll() return value
dmaengine: idxd: Refactor remove call with idxd_cleanup() helper
dmaengine: idxd: Add missing idxd cleanup to fix memory leak in remove call
dmaengine: idxd: fix memory leak in error handling path of idxd_pci_probe
dmaengine: idxd: fix memory leak in error handling path of idxd_alloc
dmaengine: idxd: Add missing cleanups in cleanup internals
dmaengine: idxd: Add missing cleanup for early error out in idxd_setup_internals
dmaengine: idxd: fix memory leak in error handling path of idxd_setup_groups
dmaengine: idxd: fix memory leak in error handling path of idxd_setup_engines
dmaengine: idxd: fix memory leak in error handling path of idxd_setup_wqs
dmaengine: ptdma: Move variable condition check to the first place and remove redundancy
dmaengine: idxd: Fix allowing write() from different address spaces
dmaengine: ti: k3-udma: Add missing locking
dmaengine: ti: k3-udma: Use cap_mask directly from dma_device structure instead of a local copy
dmaengine: Revert "dmaengine: dmatest: Fix dmatest waiting less when interrupted"
dmaengine: idxd: cdev: Fix uninitialized use of sva in idxd_cdev_open

+148 -67
+10 -9
drivers/dma/amd/ptdma/ptdma-dmaengine.c
··· 342 342 struct pt_dma_chan *chan; 343 343 unsigned long flags; 344 344 345 + if (!desc) 346 + return; 347 + 345 348 dma_chan = desc->vd.tx.chan; 346 349 chan = to_pt_chan(dma_chan); 347 350 ··· 358 355 desc->status = DMA_ERROR; 359 356 360 357 spin_lock_irqsave(&chan->vc.lock, flags); 361 - if (desc) { 362 - if (desc->status != DMA_COMPLETE) { 363 - if (desc->status != DMA_ERROR) 364 - desc->status = DMA_COMPLETE; 358 + if (desc->status != DMA_COMPLETE) { 359 + if (desc->status != DMA_ERROR) 360 + desc->status = DMA_COMPLETE; 365 361 366 - dma_cookie_complete(tx_desc); 367 - dma_descriptor_unmap(tx_desc); 368 - } else { 369 - tx_desc = NULL; 370 - } 362 + dma_cookie_complete(tx_desc); 363 + dma_descriptor_unmap(tx_desc); 364 + } else { 365 + tx_desc = NULL; 371 366 } 372 367 spin_unlock_irqrestore(&chan->vc.lock, flags); 373 368
+3 -3
drivers/dma/dmatest.c
··· 841 841 } else { 842 842 dma_async_issue_pending(chan); 843 843 844 - wait_event_timeout(thread->done_wait, 845 - done->done, 846 - msecs_to_jiffies(params->timeout)); 844 + wait_event_freezable_timeout(thread->done_wait, 845 + done->done, 846 + msecs_to_jiffies(params->timeout)); 847 847 848 848 status = dma_async_is_tx_complete(chan, cookie, NULL, 849 849 NULL);
+1 -1
drivers/dma/fsl-edma-main.c
··· 57 57 58 58 intr = edma_readl_chreg(fsl_chan, ch_int); 59 59 if (!intr) 60 - return IRQ_HANDLED; 60 + return IRQ_NONE; 61 61 62 62 edma_writel_chreg(fsl_chan, 1, ch_int); 63 63
+11 -2
drivers/dma/idxd/cdev.c
··· 222 222 struct idxd_wq *wq; 223 223 struct device *dev, *fdev; 224 224 int rc = 0; 225 - struct iommu_sva *sva; 225 + struct iommu_sva *sva = NULL; 226 226 unsigned int pasid; 227 227 struct idxd_cdev *idxd_cdev; 228 228 ··· 317 317 if (device_user_pasid_enabled(idxd)) 318 318 idxd_xa_pasid_remove(ctx); 319 319 failed_get_pasid: 320 - if (device_user_pasid_enabled(idxd)) 320 + if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) 321 321 iommu_sva_unbind_device(sva); 322 322 failed: 323 323 mutex_unlock(&wq->wq_lock); ··· 407 407 if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) 408 408 return -EPERM; 409 409 410 + if (current->mm != ctx->mm) 411 + return -EPERM; 412 + 410 413 rc = check_vma(wq, vma, __func__); 411 414 if (rc < 0) 412 415 return rc; ··· 476 473 ssize_t written = 0; 477 474 int i; 478 475 476 + if (current->mm != ctx->mm) 477 + return -EPERM; 478 + 479 479 for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { 480 480 int rc = idxd_submit_user_descriptor(ctx, udesc + i); 481 481 ··· 498 492 struct idxd_wq *wq = ctx->wq; 499 493 struct idxd_device *idxd = wq->idxd; 500 494 __poll_t out = 0; 495 + 496 + if (current->mm != ctx->mm) 497 + return POLLNVAL; 501 498 502 499 poll_wait(filp, &wq->err_queue, wait); 503 500 spin_lock(&idxd->dev_lock);
+113 -46
drivers/dma/idxd/init.c
··· 155 155 pci_free_irq_vectors(pdev); 156 156 } 157 157 158 + static void idxd_clean_wqs(struct idxd_device *idxd) 159 + { 160 + struct idxd_wq *wq; 161 + struct device *conf_dev; 162 + int i; 163 + 164 + for (i = 0; i < idxd->max_wqs; i++) { 165 + wq = idxd->wqs[i]; 166 + if (idxd->hw.wq_cap.op_config) 167 + bitmap_free(wq->opcap_bmap); 168 + kfree(wq->wqcfg); 169 + conf_dev = wq_confdev(wq); 170 + put_device(conf_dev); 171 + kfree(wq); 172 + } 173 + bitmap_free(idxd->wq_enable_map); 174 + kfree(idxd->wqs); 175 + } 176 + 158 177 static int idxd_setup_wqs(struct idxd_device *idxd) 159 178 { 160 179 struct device *dev = &idxd->pdev->dev; ··· 188 169 189 170 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 190 171 if (!idxd->wq_enable_map) { 191 - kfree(idxd->wqs); 192 - return -ENOMEM; 172 + rc = -ENOMEM; 173 + goto err_bitmap; 193 174 } 194 175 195 176 for (i = 0; i < idxd->max_wqs; i++) { ··· 208 189 conf_dev->bus = &dsa_bus_type; 209 190 conf_dev->type = &idxd_wq_device_type; 210 191 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 211 - if (rc < 0) { 212 - put_device(conf_dev); 192 + if (rc < 0) 213 193 goto err; 214 - } 215 194 216 195 mutex_init(&wq->wq_lock); 217 196 init_waitqueue_head(&wq->err_queue); ··· 220 203 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 221 204 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 222 205 if (!wq->wqcfg) { 223 - put_device(conf_dev); 224 206 rc = -ENOMEM; 225 207 goto err; 226 208 } ··· 227 211 if (idxd->hw.wq_cap.op_config) { 228 212 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 229 213 if (!wq->opcap_bmap) { 230 - put_device(conf_dev); 231 214 rc = -ENOMEM; 232 - goto err; 215 + goto err_opcap_bmap; 233 216 } 234 217 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 235 218 } ··· 239 224 240 225 return 0; 241 226 242 - err: 227 + err_opcap_bmap: 228 + kfree(wq->wqcfg); 229 + 230 + err: 231 + put_device(conf_dev); 232 + kfree(wq); 233 + 243 234 while (--i >= 0) { 244 235 wq = idxd->wqs[i]; 236 + if (idxd->hw.wq_cap.op_config) 237 + bitmap_free(wq->opcap_bmap); 238 + kfree(wq->wqcfg); 245 239 conf_dev = wq_confdev(wq); 246 240 put_device(conf_dev); 241 + kfree(wq); 242 + 247 243 } 244 + bitmap_free(idxd->wq_enable_map); 245 + 246 + err_bitmap: 247 + kfree(idxd->wqs); 248 + 248 249 return rc; 250 + } 251 + 252 + static void idxd_clean_engines(struct idxd_device *idxd) 253 + { 254 + struct idxd_engine *engine; 255 + struct device *conf_dev; 256 + int i; 257 + 258 + for (i = 0; i < idxd->max_engines; i++) { 259 + engine = idxd->engines[i]; 260 + conf_dev = engine_confdev(engine); 261 + put_device(conf_dev); 262 + kfree(engine); 263 + } 264 + kfree(idxd->engines); 249 265 } 250 266 251 267 static int idxd_setup_engines(struct idxd_device *idxd) ··· 309 263 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 310 264 if (rc < 0) { 311 265 put_device(conf_dev); 266 + kfree(engine); 312 267 goto err; 313 268 } 314 269 ··· 323 276 engine = idxd->engines[i]; 324 277 conf_dev = engine_confdev(engine); 325 278 put_device(conf_dev); 279 + kfree(engine); 326 280 } 281 + kfree(idxd->engines); 282 + 327 283 return rc; 284 + } 285 + 286 + static void idxd_clean_groups(struct idxd_device *idxd) 287 + { 288 + struct idxd_group *group; 289 + int i; 290 + 291 + for (i = 0; i < idxd->max_groups; i++) { 292 + group = idxd->groups[i]; 293 + put_device(group_confdev(group)); 294 + kfree(group); 295 + } 296 + kfree(idxd->groups); 328 297 } 329 298 330 299 static int idxd_setup_groups(struct idxd_device *idxd) ··· 373 310 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 374 311 if (rc < 0) { 375 312 put_device(conf_dev); 313 + kfree(group); 376 314 goto err; 377 315 } 378 316 ··· 398 334 while (--i >= 0) { 399 335 group = idxd->groups[i]; 400 336 put_device(group_confdev(group)); 337 + kfree(group); 401 338 } 339 + kfree(idxd->groups); 340 + 402 341 return rc; 403 342 } 404 343 405 344 static void idxd_cleanup_internals(struct idxd_device *idxd) 406 345 { 407 - int i; 408 - 409 - for (i = 0; i < idxd->max_groups; i++) 410 - put_device(group_confdev(idxd->groups[i])); 411 - for (i = 0; i < idxd->max_engines; i++) 412 - put_device(engine_confdev(idxd->engines[i])); 413 - for (i = 0; i < idxd->max_wqs; i++) 414 - put_device(wq_confdev(idxd->wqs[i])); 346 + idxd_clean_groups(idxd); 347 + idxd_clean_engines(idxd); 348 + idxd_clean_wqs(idxd); 415 349 destroy_workqueue(idxd->wq); 416 350 } 417 351 ··· 452 390 static int idxd_setup_internals(struct idxd_device *idxd) 453 391 { 454 392 struct device *dev = &idxd->pdev->dev; 455 - int rc, i; 393 + int rc; 456 394 457 395 init_waitqueue_head(&idxd->cmd_waitq); 458 396 ··· 483 421 err_evl: 484 422 destroy_workqueue(idxd->wq); 485 423 err_wkq_create: 486 - for (i = 0; i < idxd->max_groups; i++) 487 - put_device(group_confdev(idxd->groups[i])); 424 + idxd_clean_groups(idxd); 488 425 err_group: 489 - for (i = 0; i < idxd->max_engines; i++) 490 - put_device(engine_confdev(idxd->engines[i])); 426 + idxd_clean_engines(idxd); 491 427 err_engine: 492 - for (i = 0; i < idxd->max_wqs; i++) 493 - put_device(wq_confdev(idxd->wqs[i])); 428 + idxd_clean_wqs(idxd); 494 429 err_wqs: 495 430 return rc; 496 431 } ··· 587 528 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); 588 529 } 589 530 531 + static void idxd_free(struct idxd_device *idxd) 532 + { 533 + if (!idxd) 534 + return; 535 + 536 + put_device(idxd_confdev(idxd)); 537 + bitmap_free(idxd->opcap_bmap); 538 + ida_free(&idxd_ida, idxd->id); 539 + kfree(idxd); 540 + } 541 + 590 542 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 591 543 { 592 544 struct device *dev = &pdev->dev; ··· 615 545 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 616 546 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 617 547 if (idxd->id < 0) 618 - return NULL; 548 + goto err_ida; 619 549 620 550 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 621 - if (!idxd->opcap_bmap) { 622 - ida_free(&idxd_ida, idxd->id); 623 - return NULL; 624 - } 551 + if (!idxd->opcap_bmap) 552 + goto err_opcap; 625 553 626 554 device_initialize(conf_dev); 627 555 conf_dev->parent = dev; 628 556 conf_dev->bus = &dsa_bus_type; 629 557 conf_dev->type = idxd->data->dev_type; 630 558 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 631 - if (rc < 0) { 632 - put_device(conf_dev); 633 - return NULL; 634 - } 559 + if (rc < 0) 560 + goto err_name; 635 561 636 562 spin_lock_init(&idxd->dev_lock); 637 563 spin_lock_init(&idxd->cmd_lock); 638 564 639 565 return idxd; 566 + 567 + err_name: 568 + put_device(conf_dev); 569 + bitmap_free(idxd->opcap_bmap); 570 + err_opcap: 571 + ida_free(&idxd_ida, idxd->id); 572 + err_ida: 573 + kfree(idxd); 574 + 575 + return NULL; 640 576 } 641 577 642 578 static int idxd_enable_system_pasid(struct idxd_device *idxd) ··· 1266 1190 err: 1267 1191 pci_iounmap(pdev, idxd->reg_base); 1268 1192 err_iomap: 1269 - put_device(idxd_confdev(idxd)); 1193 + idxd_free(idxd); 1270 1194 err_idxd_alloc: 1271 1195 pci_disable_device(pdev); 1272 1196 return rc; ··· 1308 1232 static void idxd_remove(struct pci_dev *pdev) 1309 1233 { 1310 1234 struct idxd_device *idxd = pci_get_drvdata(pdev); 1311 - struct idxd_irq_entry *irq_entry; 1312 1235 1313 1236 idxd_unregister_devices(idxd); 1314 1237 /* ··· 1320 1245 get_device(idxd_confdev(idxd)); 1321 1246 device_unregister(idxd_confdev(idxd)); 1322 1247 idxd_shutdown(pdev); 1323 - if (device_pasid_enabled(idxd)) 1324 - idxd_disable_system_pasid(idxd); 1325 1248 idxd_device_remove_debugfs(idxd); 1326 - 1327 - irq_entry = idxd_get_ie(idxd, 0); 1328 - free_irq(irq_entry->vector, irq_entry); 1329 - pci_free_irq_vectors(pdev); 1249 + idxd_cleanup(idxd); 1330 1250 pci_iounmap(pdev, idxd->reg_base); 1331 - if (device_user_pasid_enabled(idxd)) 1332 - idxd_disable_sva(pdev); 1333 - pci_disable_device(pdev); 1334 - destroy_workqueue(idxd->wq); 1335 - perfmon_pmu_remove(idxd); 1336 1251 put_device(idxd_confdev(idxd)); 1252 + idxd_free(idxd); 1253 + pci_disable_device(pdev); 1337 1254 } 1338 1255 1339 1256 static struct pci_driver idxd_pci_driver = {
+2 -4
drivers/dma/mediatek/mtk-cqdma.c
··· 420 420 { 421 421 struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); 422 422 struct virt_dma_desc *vd; 423 - unsigned long flags; 424 423 425 - spin_lock_irqsave(&cvc->pc->lock, flags); 426 424 list_for_each_entry(vd, &cvc->pc->queue, node) 427 425 if (vd->tx.cookie == cookie) { 428 - spin_unlock_irqrestore(&cvc->pc->lock, flags); 429 426 return vd; 430 427 } 431 - spin_unlock_irqrestore(&cvc->pc->lock, flags); 432 428 433 429 list_for_each_entry(vd, &cvc->vc.desc_issued, node) 434 430 if (vd->tx.cookie == cookie) ··· 448 452 if (ret == DMA_COMPLETE || !txstate) 449 453 return ret; 450 454 455 + spin_lock_irqsave(&cvc->pc->lock, flags); 451 456 spin_lock_irqsave(&cvc->vc.lock, flags); 452 457 vd = mtk_cqdma_find_active_desc(c, cookie); 453 458 spin_unlock_irqrestore(&cvc->vc.lock, flags); 459 + spin_unlock_irqrestore(&cvc->pc->lock, flags); 454 460 455 461 if (vd) { 456 462 cvd = to_cqdma_vdesc(vd);
+8 -2
drivers/dma/ti/k3-udma.c
··· 1091 1091 u32 residue_diff; 1092 1092 ktime_t time_diff; 1093 1093 unsigned long delay; 1094 + unsigned long flags; 1094 1095 1095 1096 while (1) { 1097 + spin_lock_irqsave(&uc->vc.lock, flags); 1098 + 1096 1099 if (uc->desc) { 1097 1100 /* Get previous residue and time stamp */ 1098 1101 residue_diff = uc->tx_drain.residue; ··· 1130 1127 break; 1131 1128 } 1132 1129 1130 + spin_unlock_irqrestore(&uc->vc.lock, flags); 1131 + 1133 1132 usleep_range(ktime_to_us(delay), 1134 1133 ktime_to_us(delay) + 10); 1135 1134 continue; ··· 1148 1143 1149 1144 break; 1150 1145 } 1146 + 1147 + spin_unlock_irqrestore(&uc->vc.lock, flags); 1151 1148 } 1152 1149 1153 1150 static irqreturn_t udma_ring_irq_handler(int irq, void *data) ··· 4253 4246 struct of_dma *ofdma) 4254 4247 { 4255 4248 struct udma_dev *ud = ofdma->of_dma_data; 4256 - dma_cap_mask_t mask = ud->ddev.cap_mask; 4257 4249 struct udma_filter_param filter_param; 4258 4250 struct dma_chan *chan; 4259 4251 ··· 4284 4278 } 4285 4279 } 4286 4280 4287 - chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, 4281 + chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, 4288 4282 ofdma->of_node); 4289 4283 if (!chan) { 4290 4284 dev_err(ud->dev, "get channel fail in %s.\n", __func__);