Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mailbox-fixes-v6.18-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/jassibrar/mailbox

Pull mailbox fixes from Jassi Brar:

- omap: check for pending msgs only when mbox is exclusive

- mailbox-test: debugfs_create_dir error checking

- mtk:
- cmdq: fix DMA address handling
- gpueb: Add missing 'static' to mailbox ops struct

- pcc: don't zero error register

- th1520: fix clock imbalance on probe failure

* tag 'mailbox-fixes-v6.18-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/jassibrar/mailbox:
mailbox: th1520: fix clock imbalance on probe failure
mailbox: pcc: don't zero error register
mailbox: mtk-gpueb: Add missing 'static' to mailbox ops struct
mailbox: mtk-cmdq: Refine DMA address handling for the command buffer
mailbox: mailbox-test: Fix debugfs_create_dir error checking
mailbox: omap-mailbox: Check for pending msgs only when mbox is exclusive

+68 -38
+1 -1
drivers/mailbox/mailbox-test.c
··· 268 268 return 0; 269 269 270 270 tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL); 271 - if (!tdev->root_debugfs_dir) { 271 + if (IS_ERR(tdev->root_debugfs_dir)) { 272 272 dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n"); 273 273 return -EINVAL; 274 274 }
+1 -3
drivers/mailbox/mailbox-th1520.c
··· 435 435 } 436 436 437 437 ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv); 438 - if (ret) { 439 - clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks); 438 + if (ret) 440 439 return ret; 441 - } 442 440 443 441 /* 444 442 * The address mappings in the device tree align precisely with those
+31 -14
drivers/mailbox/mtk-cmdq-mailbox.c
··· 92 92 u32 gce_num; 93 93 }; 94 94 95 + static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata) 96 + { 97 + /* Convert DMA addr (PA or IOVA) to GCE readable addr */ 98 + return addr >> pdata->shift; 99 + } 100 + 101 + static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata) 102 + { 103 + /* Revert GCE readable addr to DMA addr (PA or IOVA) */ 104 + return (dma_addr_t)addr << pdata->shift; 105 + } 106 + 95 107 u8 cmdq_get_shift_pa(struct mbox_chan *chan) 96 108 { 97 109 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); ··· 200 188 struct cmdq_task *prev_task = list_last_entry( 201 189 &thread->task_busy_list, typeof(*task), list_entry); 202 190 u64 *prev_task_base = prev_task->pkt->va_base; 191 + u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata); 203 192 204 193 /* let previous task jump to this task */ 205 194 dma_sync_single_for_cpu(dev, prev_task->pa_base, 206 195 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 207 - prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = 208 - (u64)CMDQ_JUMP_BY_PA << 32 | 209 - (task->pa_base >> task->cmdq->pdata->shift); 196 + prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr; 210 197 dma_sync_single_for_device(dev, prev_task->pa_base, 211 198 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 212 199 ··· 248 237 struct cmdq_thread *thread) 249 238 { 250 239 struct cmdq_task *task, *tmp, *curr_task = NULL; 251 - u32 curr_pa, irq_flag, task_end_pa; 240 + u32 irq_flag, gce_addr; 241 + dma_addr_t curr_pa, task_end_pa; 252 242 bool err; 253 243 254 244 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); ··· 271 259 else 272 260 return; 273 261 274 - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift; 262 + gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); 263 + curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 275 264 276 265 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 277 266 list_entry) { ··· 391 378 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 392 379 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 393 380 struct cmdq_task *task; 394 - unsigned long curr_pa, end_pa; 381 + u32 gce_addr; 382 + dma_addr_t curr_pa, end_pa; 395 383 396 384 /* Client should not flush new tasks if suspended. */ 397 385 WARN_ON(cmdq->suspended); ··· 416 402 */ 417 403 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); 418 404 419 - writel(task->pa_base >> cmdq->pdata->shift, 420 - thread->base + CMDQ_THR_CURR_ADDR); 421 - writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 422 - thread->base + CMDQ_THR_END_ADDR); 405 + gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata); 406 + writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR); 407 + gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata); 408 + writel(gce_addr, thread->base + CMDQ_THR_END_ADDR); 423 409 424 410 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); 425 411 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); 426 412 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); 427 413 } else { 428 414 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 429 - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << 430 - cmdq->pdata->shift; 431 - end_pa = readl(thread->base + CMDQ_THR_END_ADDR) << 432 - cmdq->pdata->shift; 415 + gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); 416 + curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 417 + gce_addr = readl(thread->base + CMDQ_THR_END_ADDR); 418 + end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 433 419 /* check boundary */ 434 420 if (curr_pa == end_pa - CMDQ_INST_SIZE || 435 421 curr_pa == end_pa) { ··· 659 645 err = cmdq_get_clocks(dev, cmdq); 660 646 if (err) 661 647 return err; 648 + 649 + dma_set_coherent_mask(dev, 650 + DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift)); 662 651 663 652 cmdq->mbox.dev = dev; 664 653 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
+1 -1
drivers/mailbox/mtk-gpueb-mailbox.c
··· 200 200 return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num)); 201 201 } 202 202 203 - const struct mbox_chan_ops mtk_gpueb_mbox_ops = { 203 + static const struct mbox_chan_ops mtk_gpueb_mbox_ops = { 204 204 .send_data = mtk_gpueb_mbox_send_data, 205 205 .startup = mtk_gpueb_mbox_startup, 206 206 .shutdown = mtk_gpueb_mbox_shutdown,
+20 -15
drivers/mailbox/omap-mailbox.c
··· 68 68 69 69 struct omap_mbox_match_data { 70 70 u32 intr_type; 71 + bool is_exclusive; 71 72 }; 72 73 73 74 struct omap_mbox_device { ··· 79 78 u32 num_users; 80 79 u32 num_fifos; 81 80 u32 intr_type; 81 + const struct omap_mbox_match_data *mbox_data; 82 82 }; 83 83 84 84 struct omap_mbox { ··· 343 341 if (pm_runtime_status_suspended(dev)) 344 342 return 0; 345 343 346 - for (fifo = 0; fifo < mdev->num_fifos; fifo++) { 347 - if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) { 348 - dev_err(mdev->dev, "fifo %d has unexpected unread messages\n", 349 - fifo); 350 - return -EBUSY; 344 + if (mdev->mbox_data->is_exclusive) { 345 + for (fifo = 0; fifo < mdev->num_fifos; fifo++) { 346 + if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) { 347 + dev_err(mdev->dev, "fifo %d has unexpected unread messages\n", 348 + fifo); 349 + return -EBUSY; 350 + } 351 351 } 352 352 } 353 353 ··· 382 378 SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume) 383 379 }; 384 380 385 - static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 }; 386 - static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 }; 381 + static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1, true }; 382 + static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2, true }; 383 + static const struct omap_mbox_match_data am654_data = { MBOX_INTR_CFG_TYPE2, false }; 387 384 388 385 static const struct of_device_id omap_mailbox_of_match[] = { 389 386 { ··· 401 396 }, 402 397 { 403 398 .compatible = "ti,am654-mailbox", 404 - .data = &omap4_data, 399 + .data = &am654_data, 405 400 }, 406 401 { 407 402 .compatible = "ti,am64-mailbox", 408 - .data = &omap4_data, 403 + .data = &am654_data, 409 404 }, 410 405 { 411 406 /* end */ ··· 454 449 struct omap_mbox_fifo *fifo; 455 450 struct device_node *node = pdev->dev.of_node; 456 451 struct device_node *child; 457 - const struct omap_mbox_match_data *match_data; 458 452 struct mbox_controller *controller; 459 453 u32 intr_type, info_count; 460 454 u32 num_users, num_fifos; ··· 465 461 pr_err("%s: only DT-based devices are supported\n", __func__); 466 462 return -ENODEV; 467 463 } 468 - 469 - match_data = of_device_get_match_data(&pdev->dev); 470 - if (!match_data) 471 - return -ENODEV; 472 - intr_type = match_data->intr_type; 473 464 474 465 if (of_property_read_u32(node, "ti,mbox-num-users", &num_users)) 475 466 return -ENODEV; ··· 481 482 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); 482 483 if (!mdev) 483 484 return -ENOMEM; 485 + 486 + mdev->mbox_data = device_get_match_data(&pdev->dev); 487 + if (!mdev->mbox_data) 488 + return -ENODEV; 489 + 490 + intr_type = mdev->mbox_data->intr_type; 484 491 485 492 mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0); 486 493 if (IS_ERR(mdev->mbox_base))
+4 -4
drivers/mailbox/pcc.c
··· 276 276 if (ret) 277 277 return ret; 278 278 279 - val &= pchan->error.status_mask; 280 - if (val) { 281 - val &= ~pchan->error.status_mask; 279 + if (val & pchan->error.status_mask) { 280 + val &= pchan->error.preserve_mask; 282 281 pcc_chan_reg_write(&pchan->error, val); 283 282 return -EIO; 284 283 } ··· 744 745 745 746 ret = pcc_chan_reg_init(&pchan->error, 746 747 &pcct_ext->error_status_register, 747 - 0, 0, pcct_ext->error_status_mask, 748 + ~pcct_ext->error_status_mask, 0, 749 + pcct_ext->error_status_mask, 748 750 "Error Status"); 749 751 } 750 752 return ret;
+10
include/linux/mailbox/mtk-cmdq-mailbox.h
··· 77 77 size_t buf_size; /* real buffer size */ 78 78 }; 79 79 80 + /** 81 + * cmdq_get_shift_pa() - get the shift bits of physical address 82 + * @chan: mailbox channel 83 + * 84 + * GCE can only fetch the command buffer address from a 32-bit register. 85 + * Some SOCs support more than 32-bit command buffer address for GCE, which 86 + * requires some shift bits to make the address fit into the 32-bit register. 87 + * 88 + * Return: the shift bits of physical address 89 + */ 80 90 u8 cmdq_get_shift_pa(struct mbox_chan *chan); 81 91 82 92 #endif /* __MTK_CMDQ_MAILBOX_H__ */