Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/msm/mdp5: drop single flush support

Support for using a single CTL for flushing both interfaces was not in
use since the MDP5 driver dropped support for dual DSI configurations in
the commit df3c7899946c ("drm/msm/mdp5: drop split display support").
Having the MDP 3.x support migrated to the DPU driver the single CTL
flush is applicable to the platforms suspproted by the MDP5 driver. Drop
it alltogether.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
Patchwork: https://patchwork.freedesktop.org/patch/713916/
Link: https://lore.kernel.org/r/20260325-mdp5-drop-single-flush-v1-1-862a38b4d2ec@oss.qualcomm.com

-91
-90
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
··· 17 17 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...) 18 18 * 19 19 * Hardware capabilities determine the number of concurrent data paths 20 - * 21 - * In certain use cases (high-resolution dual pipe), one single CTL can be 22 - * shared across multiple CRTCs. 23 20 */ 24 21 25 22 #define CTL_STAT_BUSY 0x1 ··· 43 46 u32 pending_ctl_trigger; 44 47 45 48 bool cursor_on; 46 - 47 - /* True if the current CTL has FLUSH bits pending for single FLUSH. */ 48 - bool flush_pending; 49 - 50 - struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ 51 49 }; 52 50 53 51 struct mdp5_ctl_manager { ··· 54 62 55 63 /* to filter out non-present bits in the current hardware config */ 56 64 u32 flush_hw_mask; 57 - 58 - /* status for single FLUSH */ 59 - bool single_flush_supported; 60 - u32 single_flush_pending_mask; 61 65 62 66 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ 63 67 spinlock_t pool_lock; ··· 473 485 return sw_mask; 474 486 } 475 487 476 - static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, 477 - u32 *flush_id) 478 - { 479 - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 480 - 481 - if (ctl->pair) { 482 - DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); 483 - ctl->flush_pending = true; 484 - ctl_mgr->single_flush_pending_mask |= (*flush_mask); 485 - *flush_mask = 0; 486 - 487 - if (ctl->pair->flush_pending) { 488 - *flush_id = min_t(u32, ctl->id, ctl->pair->id); 489 - *flush_mask = ctl_mgr->single_flush_pending_mask; 490 - 491 - ctl->flush_pending = false; 492 - ctl->pair->flush_pending = false; 493 - ctl_mgr->single_flush_pending_mask = 0; 494 - 495 - DBG("Single FLUSH mask %x,ID %d", *flush_mask, 496 - *flush_id); 497 - } 498 - } 499 - } 500 - 501 488 /** 502 489 * mdp5_ctl_commit() - Register Flush 503 490 * ··· 518 555 519 556 curr_ctl_flush_mask = flush_mask; 520 557 521 - fix_for_single_flush(ctl, &flush_mask, &flush_id); 522 - 523 558 if (!start) { 524 559 ctl->flush_mask |= flush_mask; 525 560 return curr_ctl_flush_mask; ··· 547 586 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) 548 587 { 549 588 return WARN_ON(!ctl) ? -EINVAL : ctl->id; 550 - } 551 - 552 - /* 553 - * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH 554 - */ 555 - int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) 556 - { 557 - struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; 558 - struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); 559 - 560 - /* do nothing silently if hw doesn't support */ 561 - if (!ctl_mgr->single_flush_supported) 562 - return 0; 563 - 564 - if (!enable) { 565 - ctlx->pair = NULL; 566 - ctly->pair = NULL; 567 - mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); 568 - return 0; 569 - } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { 570 - DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); 571 - return -EINVAL; 572 - } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { 573 - DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); 574 - return -EINVAL; 575 - } 576 - 577 - ctlx->pair = ctly; 578 - ctly->pair = ctlx; 579 - 580 - mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 581 - MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); 582 - 583 - return 0; 584 589 } 585 590 586 591 /* ··· 614 687 { 615 688 struct mdp5_ctl_manager *ctl_mgr; 616 689 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); 617 - int rev = mdp5_cfg_get_hw_rev(cfg_hnd); 618 - unsigned dsi_cnt = 0; 619 690 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; 620 691 unsigned long flags; 621 692 int c, ret; ··· 655 730 spin_lock_init(&ctl->hw_lock); 656 731 } 657 732 658 - /* 659 - * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI 660 - * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when 661 - * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. 662 - * Single FLUSH is supported from hw rev v3.0. 663 - */ 664 - for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) 665 - if (hw_cfg->intf.connect[c] == INTF_DSI) 666 - dsi_cnt++; 667 - if ((rev >= 3) && (dsi_cnt > 1)) { 668 - ctl_mgr->single_flush_supported = true; 669 - /* Reserve CTL0/1 for INTF1/2 */ 670 - ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; 671 - ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; 672 - } 673 733 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); 674 734 DBG("Pool of %d CTLs created.", ctl_mgr->nctl); 675 735
-1
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
··· 35 35 36 36 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 37 37 int cursor_id, bool enable); 38 - int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); 39 38 40 39 #define MAX_PIPE_STAGE 2 41 40