Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mtk-soc-for-v6.20' of https://git.kernel.org/pub/scm/linux/kernel/git/mediatek/linux into soc/drivers

MediaTek soc driver updates

This adds:
- A socinfo entry for the MT8371 Genio 520 SoC
- Support for the Dynamic Voltage and Frequency Scaling
Resource Controller (DVFSRC) version 4, found in the
new MediaTek Kompanio Ultra (MT8196) SoC
- Initial support for the CMDQ mailbox found in the MT8196.
- A memory leak fix in the MediaTek SVS driver's debug ops.

* tag 'mtk-soc-for-v6.20' of https://git.kernel.org/pub/scm/linux/kernel/git/mediatek/linux:
soc: mediatek: mtk-cmdq: Add mminfra_offset adjustment for DRAM addresses
soc: mediatek: mtk-cmdq: Extend cmdq_pkt_write API for SoCs without subsys ID
soc: mediatek: mtk-cmdq: Add pa_base parsing for hardware without subsys ID support
soc: mediatek: mtk-cmdq: Add cmdq_get_mbox_priv() in cmdq_pkt_create()
mailbox: mtk-cmdq: Add driver data to support for MT8196
mailbox: mtk-cmdq: Add mminfra_offset configuration for DRAM transaction
mailbox: mtk-cmdq: Add GCE hardware virtualization configuration
mailbox: mtk-cmdq: Add cmdq private data to cmdq_pkt for generating instruction
soc: mediatek: mtk-dvfsrc: Rework bandwidth calculations
soc: mediatek: mtk-dvfsrc: Get and Enable DVFSRC clock
soc: mediatek: mtk-dvfsrc: Add support for DVFSRCv4 and MT8196
soc: mediatek: mtk-dvfsrc: Write bandwidth to EMI DDR if present
soc: mediatek: mtk-dvfsrc: Add a new callback for calc_dram_bw
soc: mediatek: mtk-dvfsrc: Add and propagate DVFSRC bandwidth type
soc: mediatek: mtk-dvfsrc: Change error check for DVFSRCv4 START cmd
dt-bindings: soc: mediatek: dvfsrc: Document clock
soc: mediatek: mtk-socinfo: Add entry for MT8371AV/AZA Genio 520
soc: mediatek: svs: Fix memory leak in svs_enable_debug_write()

Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+599 -42
+6
Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml
··· 34 34 maxItems: 1 35 35 description: DVFSRC common register address and length. 36 36 37 + clocks: 38 + items: 39 + - description: Clock that drives the DVFSRC MCU 40 + 37 41 regulators: 38 42 type: object 39 43 $ref: /schemas/regulator/mediatek,mt6873-dvfsrc-regulator.yaml# ··· 54 50 55 51 examples: 56 52 - | 53 + #include <dt-bindings/clock/mt8195-clk.h> 57 54 soc { 58 55 #address-cells = <2>; 59 56 #size-cells = <2>; ··· 62 57 system-controller@10012000 { 63 58 compatible = "mediatek,mt8195-dvfsrc"; 64 59 reg = <0 0x10012000 0 0x1000>; 60 + clocks = <&topckgen CLK_TOP_DVFSRC>; 65 61 66 62 regulators { 67 63 compatible = "mediatek,mt8195-dvfsrc-regulator";
+72 -2
drivers/mailbox/mtk-cmdq-mailbox.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/platform_device.h> 16 16 #include <linux/pm_runtime.h> 17 + #include <linux/sizes.h> 17 18 #include <linux/mailbox_controller.h> 18 19 #include <linux/mailbox/mtk-cmdq-mailbox.h> 19 20 #include <linux/of.h> ··· 43 42 #define GCE_GCTL_VALUE 0x48 44 43 #define GCE_CTRL_BY_SW GENMASK(2, 0) 45 44 #define GCE_DDR_EN GENMASK(18, 16) 45 + 46 + #define GCE_VM_ID_MAP(n) (0x5018 + (n) / 10 * 4) 47 + #define GCE_VM_ID_MAP_THR_FLD_SHIFT(n) ((n) % 10 * 3) 48 + #define GCE_VM_ID_MAP_HOST_VM GENMASK(2, 0) 49 + #define GCE_VM_CPR_GSIZE 0x50c4 50 + #define GCE_VM_CPR_GSIZE_FLD_SHIFT(vm_id) ((vm_id) * 4) 51 + #define GCE_VM_CPR_GSIZE_MAX GENMASK(3, 0) 46 52 47 53 #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200 48 54 #define CMDQ_THR_ENABLED 0x1 ··· 95 87 struct gce_plat { 96 88 u32 thread_nr; 97 89 u8 shift; 90 + dma_addr_t mminfra_offset; 98 91 bool control_by_sw; 99 92 bool sw_ddr_en; 93 + bool gce_vm; 100 94 u32 gce_num; 101 95 }; 102 96 103 97 static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata) 104 98 { 105 99 /* Convert DMA addr (PA or IOVA) to GCE readable addr */ 106 - return addr >> pdata->shift; 100 + return (addr + pdata->mminfra_offset) >> pdata->shift; 107 101 } 108 102 109 103 static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata) 110 104 { 111 105 /* Revert GCE readable addr to DMA addr (PA or IOVA) */ 112 - return (dma_addr_t)addr << pdata->shift; 106 + return ((dma_addr_t)addr << pdata->shift) - pdata->mminfra_offset; 113 107 } 108 + 109 + void cmdq_get_mbox_priv(struct mbox_chan *chan, struct cmdq_mbox_priv *priv) 110 + { 111 + struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); 112 + 113 + priv->shift_pa = cmdq->pdata->shift; 114 + priv->mminfra_offset = cmdq->pdata->mminfra_offset; 115 + } 116 + EXPORT_SYMBOL(cmdq_get_mbox_priv); 114 117 115 118 u8 cmdq_get_shift_pa(struct mbox_chan *chan) 116 119 { ··· 130 111 return cmdq->pdata->shift; 131 112 } 132 113 EXPORT_SYMBOL(cmdq_get_shift_pa); 114 + 115 + static void cmdq_vm_init(struct cmdq *cmdq) 116 + { 117 + int i; 118 + u32 vm_cpr_gsize = 0, vm_id_map = 0; 119 + u32 *vm_map = NULL; 120 + 121 + if (!cmdq->pdata->gce_vm) 122 + return; 123 + 124 + vm_map = kcalloc(cmdq->pdata->thread_nr, sizeof(*vm_map), GFP_KERNEL); 125 + if (!vm_map) 126 + return; 127 + 128 + /* only configure the max CPR SRAM size to host vm (vm_id = 0) currently */ 129 + vm_cpr_gsize = GCE_VM_CPR_GSIZE_MAX << GCE_VM_CPR_GSIZE_FLD_SHIFT(0); 130 + 131 + /* set all thread mapping to host vm currently */ 132 + for (i = 0; i < cmdq->pdata->thread_nr; i++) 133 + vm_map[i] = GCE_VM_ID_MAP_HOST_VM << GCE_VM_ID_MAP_THR_FLD_SHIFT(i); 134 + 135 + /* set the amount of CPR SRAM to allocate to each VM */ 136 + writel(vm_cpr_gsize, cmdq->base + GCE_VM_CPR_GSIZE); 137 + 138 + /* config CPR_GSIZE before setting VM_ID_MAP to avoid data leakage */ 139 + for (i = 0; i < cmdq->pdata->thread_nr; i++) { 140 + vm_id_map |= vm_map[i]; 141 + /* config every 10 threads, e.g., thread id=0~9, 10~19, ..., into one register */ 142 + if ((i + 1) % 10 == 0) { 143 + writel(vm_id_map, cmdq->base + GCE_VM_ID_MAP(i)); 144 + vm_id_map = 0; 145 + } 146 + } 147 + /* config remaining threads settings */ 148 + if (cmdq->pdata->thread_nr % 10 != 0) 149 + writel(vm_id_map, cmdq->base + GCE_VM_ID_MAP(cmdq->pdata->thread_nr - 1)); 150 + 151 + kfree(vm_map); 152 + } 133 153 134 154 static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) 135 155 { ··· 214 156 215 157 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); 216 158 159 + cmdq_vm_init(cmdq); 217 160 cmdq_gctl_value_toggle(cmdq, true); 218 161 219 162 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); ··· 841 782 .gce_num = 2 842 783 }; 843 784 785 + static const struct gce_plat gce_plat_mt8196 = { 786 + .thread_nr = 32, 787 + .shift = 3, 788 + .mminfra_offset = SZ_2G, 789 + .control_by_sw = true, 790 + .sw_ddr_en = true, 791 + .gce_vm = true, 792 + .gce_num = 2 793 + }; 794 + 844 795 static const struct of_device_id cmdq_of_ids[] = { 845 796 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779}, 846 797 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173}, ··· 859 790 {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188}, 860 791 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192}, 861 792 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195}, 793 + {.compatible = "mediatek,mt8196-gce", .data = (void *)&gce_plat_mt8196}, 862 794 {} 863 795 }; 864 796 MODULE_DEVICE_TABLE(of, cmdq_of_ids);
+73 -4
drivers/soc/mediatek/mtk-cmdq-helper.c
··· 8 8 #include <linux/module.h> 9 9 #include <linux/mailbox_controller.h> 10 10 #include <linux/of.h> 11 + #include <linux/of_address.h> 11 12 #include <linux/soc/mediatek/mtk-cmdq.h> 12 13 13 14 #define CMDQ_WRITE_ENABLE_MASK BIT(0) ··· 61 60 struct cmdq_client_reg *client_reg, int idx) 62 61 { 63 62 struct of_phandle_args spec; 63 + struct resource res; 64 64 int err; 65 65 66 66 if (!client_reg) 67 67 return -ENOENT; 68 68 69 + err = of_address_to_resource(dev->of_node, 0, &res); 70 + if (err) { 71 + dev_err(dev, "Missing reg in %s node\n", dev->of_node->full_name); 72 + return -EINVAL; 73 + } 74 + client_reg->pa_base = res.start; 75 + 69 76 err = of_parse_phandle_with_fixed_args(dev->of_node, 70 77 "mediatek,gce-client-reg", 71 78 3, idx, &spec); 72 79 if (err < 0) { 73 - dev_warn(dev, 80 + dev_dbg(dev, 74 81 "error %d can't parse gce-client-reg property (%d)", 75 82 err, idx); 76 83 77 - return err; 84 + /* make subsys invalid */ 85 + client_reg->subsys = CMDQ_SUBSYS_INVALID; 86 + 87 + /* 88 + * All GCEs support writing register PA with mask without subsys, 89 + * but this requires extra GCE instructions to convert the PA into 90 + * a format that GCE can handle, which is less performance than 91 + * directly using subsys. Therefore, when subsys is available, 92 + * we prefer to use subsys for writing register PA. 93 + */ 94 + client_reg->pkt_write = cmdq_pkt_write_pa; 95 + client_reg->pkt_write_mask = cmdq_pkt_write_mask_pa; 96 + 97 + return 0; 78 98 } 79 99 80 100 client_reg->subsys = (u8)spec.args[0]; 81 101 client_reg->offset = (u16)spec.args[1]; 82 102 client_reg->size = (u16)spec.args[2]; 83 103 of_node_put(spec.np); 104 + 105 + client_reg->pkt_write = cmdq_pkt_write_subsys; 106 + client_reg->pkt_write_mask = cmdq_pkt_write_mask_subsys; 84 107 85 108 return 0; 86 109 } ··· 165 140 } 166 141 167 142 pkt->pa_base = dma_addr; 143 + cmdq_get_mbox_priv(client->chan, &pkt->priv); 168 144 169 145 return 0; 170 146 } ··· 227 201 } 228 202 EXPORT_SYMBOL(cmdq_pkt_write); 229 203 204 + int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/, u32 pa_base, 205 + u16 offset, u32 value) 206 + { 207 + int err; 208 + 209 + err = cmdq_pkt_assign(pkt, CMDQ_THR_SPR_IDX0, CMDQ_ADDR_HIGH(pa_base)); 210 + if (err < 0) 211 + return err; 212 + 213 + return cmdq_pkt_write_s_value(pkt, CMDQ_THR_SPR_IDX0, CMDQ_ADDR_LOW(offset), value); 214 + } 215 + EXPORT_SYMBOL(cmdq_pkt_write_pa); 216 + 217 + int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base /*unused*/, 218 + u16 offset, u32 value) 219 + { 220 + return cmdq_pkt_write(pkt, subsys, offset, value); 221 + } 222 + EXPORT_SYMBOL(cmdq_pkt_write_subsys); 223 + 230 224 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, 231 225 u16 offset, u32 value, u32 mask) 232 226 { ··· 263 217 return cmdq_pkt_write(pkt, subsys, offset_mask, value); 264 218 } 265 219 EXPORT_SYMBOL(cmdq_pkt_write_mask); 220 + 221 + int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/, u32 pa_base, 222 + u16 offset, u32 value, u32 mask) 223 + { 224 + int err; 225 + 226 + err = cmdq_pkt_assign(pkt, CMDQ_THR_SPR_IDX0, CMDQ_ADDR_HIGH(pa_base)); 227 + if (err < 0) 228 + return err; 229 + 230 + return cmdq_pkt_write_s_mask_value(pkt, CMDQ_THR_SPR_IDX0, 231 + CMDQ_ADDR_LOW(offset), value, mask); 232 + } 233 + EXPORT_SYMBOL(cmdq_pkt_write_mask_pa); 234 + 235 + int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base /*unused*/, 236 + u16 offset, u32 value, u32 mask) 237 + { 238 + return cmdq_pkt_write_mask(pkt, subsys, offset, value, mask); 239 + } 240 + EXPORT_SYMBOL(cmdq_pkt_write_mask_subsys); 266 241 267 242 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low, 268 243 u16 reg_idx) ··· 372 305 int ret; 373 306 374 307 /* read the value of src_addr into high_addr_reg_idx */ 308 + src_addr += pkt->priv.mminfra_offset; 375 309 ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(src_addr)); 376 310 if (ret < 0) 377 311 return ret; ··· 381 313 return ret; 382 314 383 315 /* write the value of value_reg_idx into dst_addr */ 316 + dst_addr += pkt->priv.mminfra_offset; 384 317 ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(dst_addr)); 385 318 if (ret < 0) 386 319 return ret; ··· 507 438 inst.op = CMDQ_CODE_MASK; 508 439 inst.dst_t = CMDQ_REG_TYPE; 509 440 inst.sop = CMDQ_POLL_ADDR_GPR; 510 - inst.value = addr; 441 + inst.value = addr + pkt->priv.mminfra_offset; 511 442 ret = cmdq_pkt_append_command(pkt, inst); 512 443 if (ret < 0) 513 444 return ret; ··· 567 498 struct cmdq_instruction inst = { 568 499 .op = CMDQ_CODE_JUMP, 569 500 .offset = CMDQ_JUMP_ABSOLUTE, 570 - .value = addr >> shift_pa 501 + .value = (addr + pkt->priv.mminfra_offset) >> pkt->priv.shift_pa 571 502 }; 572 503 return cmdq_pkt_append_command(pkt, inst); 573 504 }
+333 -33
drivers/soc/mediatek/mtk-dvfsrc.c
··· 7 7 8 8 #include <linux/arm-smccc.h> 9 9 #include <linux/bitfield.h> 10 + #include <linux/clk.h> 10 11 #include <linux/iopoll.h> 11 12 #include <linux/module.h> 12 13 #include <linux/of.h> ··· 16 15 #include <linux/soc/mediatek/dvfsrc.h> 17 16 #include <linux/soc/mediatek/mtk_sip_svc.h> 18 17 18 + /* DVFSRC_BASIC_CONTROL */ 19 + #define DVFSRC_V4_BASIC_CTRL_OPP_COUNT GENMASK(26, 20) 20 + 19 21 /* DVFSRC_LEVEL */ 20 22 #define DVFSRC_V1_LEVEL_TARGET_LEVEL GENMASK(15, 0) 21 23 #define DVFSRC_TGT_LEVEL_IDLE 0x00 22 24 #define DVFSRC_V1_LEVEL_CURRENT_LEVEL GENMASK(31, 16) 25 + 26 + #define DVFSRC_V4_LEVEL_TARGET_LEVEL GENMASK(15, 8) 27 + #define DVFSRC_V4_LEVEL_TARGET_PRESENT BIT(16) 23 28 24 29 /* DVFSRC_SW_REQ, DVFSRC_SW_REQ2 */ 25 30 #define DVFSRC_V1_SW_REQ2_DRAM_LEVEL GENMASK(1, 0) ··· 34 27 #define DVFSRC_V2_SW_REQ_DRAM_LEVEL GENMASK(3, 0) 35 28 #define DVFSRC_V2_SW_REQ_VCORE_LEVEL GENMASK(6, 4) 36 29 30 + #define DVFSRC_V4_SW_REQ_EMI_LEVEL GENMASK(3, 0) 31 + #define DVFSRC_V4_SW_REQ_DRAM_LEVEL GENMASK(15, 12) 32 + 37 33 /* DVFSRC_VCORE */ 38 34 #define DVFSRC_V2_VCORE_REQ_VSCP_LEVEL GENMASK(14, 12) 35 + 36 + /* DVFSRC_TARGET_GEAR */ 37 + #define DVFSRC_V4_GEAR_TARGET_DRAM GENMASK(7, 0) 38 + #define DVFSRC_V4_GEAR_TARGET_VCORE GENMASK(15, 8) 39 + 40 + /* DVFSRC_GEAR_INFO */ 41 + #define DVFSRC_V4_GEAR_INFO_REG_WIDTH 0x4 42 + #define DVFSRC_V4_GEAR_INFO_REG_LEVELS 64 43 + #define DVFSRC_V4_GEAR_INFO_VCORE GENMASK(3, 0) 44 + #define DVFSRC_V4_GEAR_INFO_EMI GENMASK(7, 4) 45 + #define DVFSRC_V4_GEAR_INFO_DRAM GENMASK(15, 12) 39 46 40 47 #define DVFSRC_POLL_TIMEOUT_US 1000 41 48 #define STARTUP_TIME_US 1 ··· 57 36 #define MTK_SIP_DVFSRC_INIT 0x0 58 37 #define MTK_SIP_DVFSRC_START 0x1 59 38 60 - struct dvfsrc_bw_constraints { 61 - u16 max_dram_nom_bw; 62 - u16 max_dram_peak_bw; 63 - u16 max_dram_hrt_bw; 39 + enum mtk_dvfsrc_bw_type { 40 + DVFSRC_BW_AVG, 41 + DVFSRC_BW_PEAK, 42 + DVFSRC_BW_HRT, 43 + DVFSRC_BW_MAX, 64 44 }; 65 45 66 46 struct dvfsrc_opp { 67 47 u32 vcore_opp; 68 48 u32 dram_opp; 49 + u32 emi_opp; 69 50 }; 70 51 71 52 struct dvfsrc_opp_desc { ··· 78 55 struct dvfsrc_soc_data; 79 56 struct mtk_dvfsrc { 80 57 struct device *dev; 58 + struct clk *clk; 81 59 struct platform_device *icc; 82 60 struct platform_device *regulator; 83 61 const struct dvfsrc_soc_data *dvd; ··· 89 65 90 66 struct dvfsrc_soc_data { 91 67 const int *regs; 68 + const u8 *bw_units; 69 + const bool has_emi_ddr; 92 70 const struct dvfsrc_opp_desc *opps_desc; 71 + u32 (*calc_dram_bw)(struct mtk_dvfsrc *dvfsrc, enum mtk_dvfsrc_bw_type type, u64 bw); 93 72 u32 (*get_target_level)(struct mtk_dvfsrc *dvfsrc); 94 73 u32 (*get_current_level)(struct mtk_dvfsrc *dvfsrc); 95 74 u32 (*get_vcore_level)(struct mtk_dvfsrc *dvfsrc); 96 75 u32 (*get_vscp_level)(struct mtk_dvfsrc *dvfsrc); 76 + u32 (*get_opp_count)(struct mtk_dvfsrc *dvfsrc); 77 + int (*get_hw_opps)(struct mtk_dvfsrc *dvfsrc); 97 78 void (*set_dram_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw); 98 79 void (*set_dram_peak_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw); 99 80 void (*set_dram_hrt_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw); ··· 107 78 void (*set_vscp_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 108 79 int (*wait_for_opp_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 109 80 int (*wait_for_vcore_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 110 - const struct dvfsrc_bw_constraints *bw_constraints; 81 + 82 + /** 83 + * @bw_max_constraints - array of maximum bandwidth for this hardware 84 + * 85 + * indexed by &enum mtk_dvfsrc_bw_type, storing the maximum permissible 86 + * hardware value for each bandwidth type. 87 + */ 88 + const u32 *const bw_max_constraints; 89 + 90 + /** 91 + * @bw_min_constraints - array of minimum bandwidth for this hardware 92 + * 93 + * indexed by &enum mtk_dvfsrc_bw_type, storing the minimum permissible 94 + * hardware value for each bandwidth type. 95 + */ 96 + const u32 *const bw_min_constraints; 111 97 }; 112 98 113 99 static u32 dvfsrc_readl(struct mtk_dvfsrc *dvfs, u32 offset) ··· 136 92 } 137 93 138 94 enum dvfsrc_regs { 95 + DVFSRC_BASIC_CONTROL, 139 96 DVFSRC_SW_REQ, 140 97 DVFSRC_SW_REQ2, 141 98 DVFSRC_LEVEL, ··· 144 99 DVFSRC_SW_BW, 145 100 DVFSRC_SW_PEAK_BW, 146 101 DVFSRC_SW_HRT_BW, 102 + DVFSRC_SW_EMI_BW, 147 103 DVFSRC_VCORE, 104 + DVFSRC_TARGET_GEAR, 105 + DVFSRC_GEAR_INFO_L, 106 + DVFSRC_GEAR_INFO_H, 148 107 DVFSRC_REGS_MAX, 149 108 }; 150 109 ··· 169 120 [DVFSRC_TARGET_LEVEL] = 0xd48, 170 121 }; 171 122 123 + static const int dvfsrc_mt8196_regs[] = { 124 + [DVFSRC_BASIC_CONTROL] = 0x0, 125 + [DVFSRC_SW_REQ] = 0x18, 126 + [DVFSRC_VCORE] = 0x80, 127 + [DVFSRC_GEAR_INFO_L] = 0xfc, 128 + [DVFSRC_SW_BW] = 0x1e8, 129 + [DVFSRC_SW_PEAK_BW] = 0x1f4, 130 + [DVFSRC_SW_HRT_BW] = 0x20c, 131 + [DVFSRC_LEVEL] = 0x5f0, 132 + [DVFSRC_TARGET_LEVEL] = 0x5f0, 133 + [DVFSRC_SW_REQ2] = 0x604, 134 + [DVFSRC_SW_EMI_BW] = 0x60c, 135 + [DVFSRC_TARGET_GEAR] = 0x6ac, 136 + [DVFSRC_GEAR_INFO_H] = 0x6b0, 137 + }; 138 + 172 139 static const struct dvfsrc_opp *dvfsrc_get_current_opp(struct mtk_dvfsrc *dvfsrc) 173 140 { 174 141 u32 level = dvfsrc->dvd->get_current_level(dvfsrc); 175 142 176 143 return &dvfsrc->curr_opps->opps[level]; 144 + } 145 + 146 + static u32 dvfsrc_get_current_target_vcore_gear(struct mtk_dvfsrc *dvfsrc) 147 + { 148 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_TARGET_GEAR); 149 + 150 + return FIELD_GET(DVFSRC_V4_GEAR_TARGET_VCORE, val); 151 + } 152 + 153 + static u32 dvfsrc_get_current_target_dram_gear(struct mtk_dvfsrc *dvfsrc) 154 + { 155 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_TARGET_GEAR); 156 + 157 + return FIELD_GET(DVFSRC_V4_GEAR_TARGET_DRAM, val); 177 158 } 178 159 179 160 static bool dvfsrc_is_idle(struct mtk_dvfsrc *dvfsrc) ··· 262 183 return 0; 263 184 } 264 185 186 + static int dvfsrc_wait_for_vcore_level_v4(struct mtk_dvfsrc *dvfsrc, u32 level) 187 + { 188 + u32 val; 189 + 190 + return readx_poll_timeout_atomic(dvfsrc_get_current_target_vcore_gear, 191 + dvfsrc, val, val >= level, 192 + STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US); 193 + } 194 + 195 + static int dvfsrc_wait_for_opp_level_v4(struct mtk_dvfsrc *dvfsrc, u32 level) 196 + { 197 + u32 val; 198 + 199 + return readx_poll_timeout_atomic(dvfsrc_get_current_target_dram_gear, 200 + dvfsrc, val, val >= level, 201 + STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US); 202 + } 203 + 265 204 static u32 dvfsrc_get_target_level_v1(struct mtk_dvfsrc *dvfsrc) 266 205 { 267 206 u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL); ··· 304 207 { 305 208 u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL); 306 209 u32 level = ffs(val); 210 + 211 + /* Valid levels */ 212 + if (level < dvfsrc->curr_opps->num_opp) 213 + return dvfsrc->curr_opps->num_opp - level; 214 + 215 + /* Zero for level 0 or invalid level */ 216 + return 0; 217 + } 218 + 219 + static u32 dvfsrc_get_target_level_v4(struct mtk_dvfsrc *dvfsrc) 220 + { 221 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_TARGET_LEVEL); 222 + 223 + if (val & DVFSRC_V4_LEVEL_TARGET_PRESENT) 224 + return FIELD_GET(DVFSRC_V4_LEVEL_TARGET_LEVEL, val) + 1; 225 + return 0; 226 + } 227 + 228 + static u32 dvfsrc_get_current_level_v4(struct mtk_dvfsrc *dvfsrc) 229 + { 230 + u32 level = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL) + 1; 307 231 308 232 /* Valid levels */ 309 233 if (level < dvfsrc->curr_opps->num_opp) ··· 385 267 dvfsrc_writel(dvfsrc, DVFSRC_VCORE, val); 386 268 } 387 269 388 - static void __dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u32 reg, 389 - u16 max_bw, u16 min_bw, u64 bw) 270 + static u32 dvfsrc_get_opp_count_v4(struct mtk_dvfsrc *dvfsrc) 390 271 { 391 - u32 new_bw = (u32)div_u64(bw, 100 * 1000); 272 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_BASIC_CONTROL); 392 273 393 - /* If bw constraints (in mbps) are defined make sure to respect them */ 394 - if (max_bw) 395 - new_bw = min(new_bw, max_bw); 396 - if (min_bw && new_bw > 0) 397 - new_bw = max(new_bw, min_bw); 274 + return FIELD_GET(DVFSRC_V4_BASIC_CTRL_OPP_COUNT, val) + 1; 275 + } 398 276 399 - dvfsrc_writel(dvfsrc, reg, new_bw); 277 + static u32 278 + dvfsrc_calc_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, enum mtk_dvfsrc_bw_type type, u64 bw) 279 + { 280 + return clamp_val(div_u64(bw, 100 * 1000), dvfsrc->dvd->bw_min_constraints[type], 281 + dvfsrc->dvd->bw_max_constraints[type]); 282 + } 283 + 284 + /** 285 + * dvfsrc_calc_dram_bw_v4 - convert kbps to hardware register bandwidth value 286 + * @dvfsrc: pointer to the &struct mtk_dvfsrc of this driver instance 287 + * @type: one of %DVFSRC_BW_AVG, %DVFSRC_BW_PEAK, or %DVFSRC_BW_HRT 288 + * @bw: the bandwidth in kilobits per second 289 + * 290 + * Returns the hardware register value appropriate for expressing @bw, clamped 291 + * to hardware limits. 292 + */ 293 + static u32 294 + dvfsrc_calc_dram_bw_v4(struct mtk_dvfsrc *dvfsrc, enum mtk_dvfsrc_bw_type type, u64 bw) 295 + { 296 + u8 bw_unit = dvfsrc->dvd->bw_units[type]; 297 + u64 bw_mbps; 298 + u32 bw_hw; 299 + 300 + if (type < DVFSRC_BW_AVG || type >= DVFSRC_BW_MAX) 301 + return 0; 302 + 303 + bw_mbps = div_u64(bw, 1000); 304 + bw_hw = div_u64((bw_mbps + bw_unit - 1), bw_unit); 305 + return clamp_val(bw_hw, dvfsrc->dvd->bw_min_constraints[type], 306 + dvfsrc->dvd->bw_max_constraints[type]); 307 + } 308 + 309 + static void __dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u32 reg, 310 + enum mtk_dvfsrc_bw_type type, u64 bw) 311 + { 312 + u32 bw_hw = dvfsrc->dvd->calc_dram_bw(dvfsrc, type, bw); 313 + 314 + dvfsrc_writel(dvfsrc, reg, bw_hw); 315 + 316 + if (type == DVFSRC_BW_AVG && dvfsrc->dvd->has_emi_ddr) 317 + dvfsrc_writel(dvfsrc, DVFSRC_SW_EMI_BW, bw_hw); 400 318 } 401 319 402 320 static void dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw) 403 321 { 404 - u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_nom_bw; 405 - 406 - __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_BW, max_bw, 0, bw); 322 + __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_BW, DVFSRC_BW_AVG, bw); 407 323 }; 408 324 409 325 static void dvfsrc_set_dram_peak_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw) 410 326 { 411 - u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_peak_bw; 412 - 413 - __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_PEAK_BW, max_bw, 0, bw); 327 + __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_PEAK_BW, DVFSRC_BW_PEAK, bw); 414 328 } 415 329 416 330 static void dvfsrc_set_dram_hrt_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw) 417 331 { 418 - u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_hrt_bw; 419 - 420 - __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_HRT_BW, max_bw, 0, bw); 332 + __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_HRT_BW, DVFSRC_BW_HRT, bw); 421 333 } 422 334 423 335 static void dvfsrc_set_opp_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level) ··· 460 312 val |= FIELD_PREP(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, opp->vcore_opp); 461 313 462 314 dev_dbg(dvfsrc->dev, "vcore_opp: %d, dram_opp: %d\n", opp->vcore_opp, opp->dram_opp); 315 + dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ, val); 316 + } 317 + 318 + static u32 dvfsrc_get_opp_gear(struct mtk_dvfsrc *dvfsrc, u8 level) 319 + { 320 + u32 reg_ofst, val; 321 + u8 idx; 322 + 323 + /* Calculate register offset and index for requested gear */ 324 + if (level < DVFSRC_V4_GEAR_INFO_REG_LEVELS) { 325 + reg_ofst = dvfsrc->dvd->regs[DVFSRC_GEAR_INFO_L]; 326 + idx = level; 327 + } else { 328 + reg_ofst = dvfsrc->dvd->regs[DVFSRC_GEAR_INFO_H]; 329 + idx = level - DVFSRC_V4_GEAR_INFO_REG_LEVELS; 330 + } 331 + reg_ofst += DVFSRC_V4_GEAR_INFO_REG_WIDTH * (level / 2); 332 + 333 + /* Read the corresponding gear register */ 334 + val = readl(dvfsrc->regs + reg_ofst); 335 + 336 + /* Each register contains two sets of data, 16 bits per gear */ 337 + val >>= 16 * (idx % 2); 338 + 339 + return val; 340 + } 341 + 342 + static int dvfsrc_get_hw_opps_v4(struct mtk_dvfsrc *dvfsrc) 343 + { 344 + struct dvfsrc_opp *dvfsrc_opps; 345 + struct dvfsrc_opp_desc *desc; 346 + u32 num_opps, gear_info; 347 + u8 num_vcore, num_dram; 348 + u8 num_emi; 349 + int i; 350 + 351 + num_opps = dvfsrc_get_opp_count_v4(dvfsrc); 352 + if (num_opps == 0) { 353 + dev_err(dvfsrc->dev, "No OPPs programmed in DVFSRC MCU.\n"); 354 + return -EINVAL; 355 + } 356 + 357 + /* 358 + * The first 16 bits set in the gear info table says how many OPPs 359 + * and how many vcore, dram and emi table entries are available. 360 + */ 361 + gear_info = dvfsrc_readl(dvfsrc, DVFSRC_GEAR_INFO_L); 362 + if (gear_info == 0) { 363 + dev_err(dvfsrc->dev, "No gear info in DVFSRC MCU.\n"); 364 + return -EINVAL; 365 + } 366 + 367 + num_vcore = FIELD_GET(DVFSRC_V4_GEAR_INFO_VCORE, gear_info) + 1; 368 + num_dram = FIELD_GET(DVFSRC_V4_GEAR_INFO_DRAM, gear_info) + 1; 369 + num_emi = FIELD_GET(DVFSRC_V4_GEAR_INFO_EMI, gear_info) + 1; 370 + dev_info(dvfsrc->dev, 371 + "Discovered %u gears and %u vcore, %u dram, %u emi table entries.\n", 372 + num_opps, num_vcore, num_dram, num_emi); 373 + 374 + /* Allocate everything now as anything else after that cannot fail */ 375 + desc = devm_kzalloc(dvfsrc->dev, sizeof(*desc), GFP_KERNEL); 376 + if (!desc) 377 + return -ENOMEM; 378 + 379 + dvfsrc_opps = devm_kcalloc(dvfsrc->dev, num_opps + 1, 380 + sizeof(*dvfsrc_opps), GFP_KERNEL); 381 + if (!dvfsrc_opps) 382 + return -ENOMEM; 383 + 384 + /* Read the OPP table gear indices */ 385 + for (i = 0; i <= num_opps; i++) { 386 + gear_info = dvfsrc_get_opp_gear(dvfsrc, num_opps - i); 387 + dvfsrc_opps[i].vcore_opp = FIELD_GET(DVFSRC_V4_GEAR_INFO_VCORE, gear_info); 388 + dvfsrc_opps[i].dram_opp = FIELD_GET(DVFSRC_V4_GEAR_INFO_DRAM, gear_info); 389 + dvfsrc_opps[i].emi_opp = FIELD_GET(DVFSRC_V4_GEAR_INFO_EMI, gear_info); 390 + }; 391 + desc->num_opp = num_opps + 1; 392 + desc->opps = dvfsrc_opps; 393 + 394 + /* Assign to main structure now that everything is done! */ 395 + dvfsrc->curr_opps = desc; 396 + 397 + return 0; 398 + } 399 + 400 + static void dvfsrc_set_dram_level_v4(struct mtk_dvfsrc *dvfsrc, u32 level) 401 + { 402 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ); 403 + 404 + val &= ~DVFSRC_V4_SW_REQ_DRAM_LEVEL; 405 + val |= FIELD_PREP(DVFSRC_V4_SW_REQ_DRAM_LEVEL, level); 406 + 407 + dev_dbg(dvfsrc->dev, "%s level=%u\n", __func__, level); 408 + 463 409 dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ, val); 464 410 } 465 411 ··· 664 422 if (IS_ERR(dvfsrc->regs)) 665 423 return PTR_ERR(dvfsrc->regs); 666 424 425 + dvfsrc->clk = devm_clk_get_enabled(&pdev->dev, NULL); 426 + if (IS_ERR(dvfsrc->clk)) 427 + return dev_err_probe(&pdev->dev, PTR_ERR(dvfsrc->clk), 428 + "Couldn't get and enable DVFSRC clock\n"); 429 + 667 430 arm_smccc_smc(MTK_SIP_DVFSRC_VCOREFS_CONTROL, MTK_SIP_DVFSRC_INIT, 668 431 0, 0, 0, 0, 0, 0, &ares); 669 432 if (ares.a0) ··· 677 430 dvfsrc->dram_type = ares.a1; 678 431 dev_dbg(&pdev->dev, "DRAM Type: %d\n", dvfsrc->dram_type); 679 432 680 - dvfsrc->curr_opps = &dvfsrc->dvd->opps_desc[dvfsrc->dram_type]; 433 + /* Newer versions of the DVFSRC MCU have pre-programmed gear tables */ 434 + if (dvfsrc->dvd->get_hw_opps) { 435 + ret = dvfsrc->dvd->get_hw_opps(dvfsrc); 436 + if (ret) 437 + return ret; 438 + } else { 439 + dvfsrc->curr_opps = &dvfsrc->dvd->opps_desc[dvfsrc->dram_type]; 440 + } 681 441 platform_set_drvdata(pdev, dvfsrc); 682 442 683 443 ret = devm_of_platform_populate(&pdev->dev); ··· 694 440 /* Everything is set up - make it run! */ 695 441 arm_smccc_smc(MTK_SIP_DVFSRC_VCOREFS_CONTROL, MTK_SIP_DVFSRC_START, 696 442 0, 0, 0, 0, 0, 0, &ares); 697 - if (ares.a0) 443 + if (ares.a0 & BIT(0)) 698 444 return dev_err_probe(&pdev->dev, -EINVAL, "Cannot start DVFSRC: %lu\n", ares.a0); 699 445 700 446 return 0; 701 447 } 702 448 703 - static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v1 = { 0, 0, 0 }; 704 - static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v2 = { 705 - .max_dram_nom_bw = 255, 706 - .max_dram_peak_bw = 255, 707 - .max_dram_hrt_bw = 1023, 449 + static const u32 dvfsrc_bw_min_constr_none[DVFSRC_BW_MAX] = { 450 + [DVFSRC_BW_AVG] = 0, 451 + [DVFSRC_BW_PEAK] = 0, 452 + [DVFSRC_BW_HRT] = 0, 453 + }; 454 + 455 + static const u32 dvfsrc_bw_max_constr_v1[DVFSRC_BW_MAX] = { 456 + [DVFSRC_BW_AVG] = U32_MAX, 457 + [DVFSRC_BW_PEAK] = U32_MAX, 458 + [DVFSRC_BW_HRT] = U32_MAX, 459 + }; 460 + 461 + static const u32 dvfsrc_bw_max_constr_v2[DVFSRC_BW_MAX] = { 462 + [DVFSRC_BW_AVG] = 65535, 463 + [DVFSRC_BW_PEAK] = 65535, 464 + [DVFSRC_BW_HRT] = 1023, 708 465 }; 709 466 710 467 static const struct dvfsrc_opp dvfsrc_opp_mt6893_lp4[] = { ··· 748 483 .set_vscp_level = dvfsrc_set_vscp_level_v2, 749 484 .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2, 750 485 .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, 751 - .bw_constraints = &dvfsrc_bw_constr_v2, 486 + .bw_max_constraints = dvfsrc_bw_max_constr_v2, 487 + .bw_min_constraints = dvfsrc_bw_min_constr_none, 752 488 }; 753 489 754 490 static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp4[] = { ··· 778 512 static const struct dvfsrc_soc_data mt8183_data = { 779 513 .opps_desc = dvfsrc_opp_mt8183_desc, 780 514 .regs = dvfsrc_mt8183_regs, 515 + .calc_dram_bw = dvfsrc_calc_dram_bw_v1, 781 516 .get_target_level = dvfsrc_get_target_level_v1, 782 517 .get_current_level = dvfsrc_get_current_level_v1, 783 518 .get_vcore_level = dvfsrc_get_vcore_level_v1, ··· 787 520 .set_vcore_level = dvfsrc_set_vcore_level_v1, 788 521 .wait_for_opp_level = dvfsrc_wait_for_opp_level_v1, 789 522 .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, 790 - .bw_constraints = &dvfsrc_bw_constr_v1, 523 + .bw_max_constraints = dvfsrc_bw_max_constr_v1, 524 + .bw_min_constraints = dvfsrc_bw_min_constr_none, 791 525 }; 792 526 793 527 static const struct dvfsrc_opp dvfsrc_opp_mt8195_lp4[] = { ··· 810 542 static const struct dvfsrc_soc_data mt8195_data = { 811 543 .opps_desc = dvfsrc_opp_mt8195_desc, 812 544 .regs = dvfsrc_mt8195_regs, 545 + .calc_dram_bw = dvfsrc_calc_dram_bw_v1, 813 546 .get_target_level = dvfsrc_get_target_level_v2, 814 547 .get_current_level = dvfsrc_get_current_level_v2, 815 548 .get_vcore_level = dvfsrc_get_vcore_level_v2, ··· 822 553 .set_vscp_level = dvfsrc_set_vscp_level_v2, 823 554 .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2, 824 555 .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, 825 - .bw_constraints = &dvfsrc_bw_constr_v2, 556 + .bw_max_constraints = dvfsrc_bw_max_constr_v2, 557 + .bw_min_constraints = dvfsrc_bw_min_constr_none, 558 + }; 559 + 560 + static const u8 mt8196_bw_units[] = { 561 + [DVFSRC_BW_AVG] = 64, 562 + [DVFSRC_BW_PEAK] = 64, 563 + [DVFSRC_BW_HRT] = 30, 564 + }; 565 + 566 + static const struct dvfsrc_soc_data mt8196_data = { 567 + .regs = dvfsrc_mt8196_regs, 568 + .bw_units = mt8196_bw_units, 569 + .has_emi_ddr = true, 570 + .get_target_level = dvfsrc_get_target_level_v4, 571 + .get_current_level = dvfsrc_get_current_level_v4, 572 + .get_vcore_level = dvfsrc_get_vcore_level_v2, 573 + .get_vscp_level = dvfsrc_get_vscp_level_v2, 574 + .get_opp_count = dvfsrc_get_opp_count_v4, 575 + .get_hw_opps = dvfsrc_get_hw_opps_v4, 576 + .calc_dram_bw = dvfsrc_calc_dram_bw_v4, 577 + .set_dram_bw = dvfsrc_set_dram_bw_v1, 578 + .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1, 579 + .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1, 580 + .set_opp_level = dvfsrc_set_dram_level_v4, 581 + .set_vcore_level = dvfsrc_set_vcore_level_v2, 582 + .set_vscp_level = dvfsrc_set_vscp_level_v2, 583 + .wait_for_opp_level = dvfsrc_wait_for_opp_level_v4, 584 + .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v4, 585 + .bw_max_constraints = dvfsrc_bw_max_constr_v2, 586 + .bw_min_constraints = dvfsrc_bw_min_constr_none, 826 587 }; 827 588 828 589 static const struct of_device_id mtk_dvfsrc_of_match[] = { 829 590 { .compatible = "mediatek,mt6893-dvfsrc", .data = &mt6893_data }, 830 591 { .compatible = "mediatek,mt8183-dvfsrc", .data = &mt8183_data }, 831 592 { .compatible = "mediatek,mt8195-dvfsrc", .data = &mt8195_data }, 593 + { .compatible = "mediatek,mt8196-dvfsrc", .data = &mt8196_data }, 832 594 { /* sentinel */ } 833 595 }; 834 596
+1
drivers/soc/mediatek/mtk-socinfo.c
··· 59 59 MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED), 60 60 MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED), 61 61 MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081), 62 + MTK_SOCINFO_ENTRY("MT8371", "MT8371AV/AZA", "Genio 520", 0x83710000, 0x00000081), 62 63 MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080), 63 64 MTK_SOCINFO_ENTRY("MT8391", "MT8391AV/AZA", "Genio 720", 0x83910000, 0x00000080), 64 65 MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
+2 -3
drivers/soc/mediatek/mtk-svs.c
··· 9 9 #include <linux/bits.h> 10 10 #include <linux/clk.h> 11 11 #include <linux/completion.h> 12 + #include <linux/cleanup.h> 12 13 #include <linux/cpu.h> 13 14 #include <linux/cpuidle.h> 14 15 #include <linux/debugfs.h> ··· 790 789 struct svs_bank *svsb = file_inode(filp)->i_private; 791 790 struct svs_platform *svsp = dev_get_drvdata(svsb->dev); 792 791 int enabled, ret; 793 - char *buf = NULL; 792 + char *buf __free(kfree) = NULL; 794 793 795 794 if (count >= PAGE_SIZE) 796 795 return -EINVAL; ··· 807 806 svs_bank_disable_and_restore_default_volts(svsp, svsb); 808 807 svsb->mode_support = SVSB_MODE_ALL_DISABLE; 809 808 } 810 - 811 - kfree(buf); 812 809 813 810 return count; 814 811 }
+19
include/linux/mailbox/mtk-cmdq-mailbox.h
··· 70 70 struct cmdq_pkt *pkt; 71 71 }; 72 72 73 + struct cmdq_mbox_priv { 74 + u8 shift_pa; 75 + dma_addr_t mminfra_offset; 76 + }; 77 + 73 78 struct cmdq_pkt { 74 79 void *va_base; 75 80 dma_addr_t pa_base; 76 81 size_t cmd_buf_size; /* command occupied size */ 77 82 size_t buf_size; /* real buffer size */ 83 + struct cmdq_mbox_priv priv; /* for generating instruction */ 78 84 }; 85 + 86 + /** 87 + * cmdq_get_mbox_priv() - get the private data of mailbox channel 88 + * @chan: mailbox channel 89 + * @priv: pointer to store the private data of mailbox channel 90 + * 91 + * While generating the GCE instruction to command buffer, the private data 92 + * of GCE hardware may need to be referenced, such as the shift bits of 93 + * physical address. 94 + * 95 + * This function should be called before generating the GCE instruction. 96 + */ 97 + void cmdq_get_mbox_priv(struct mbox_chan *chan, struct cmdq_mbox_priv *priv); 79 98 80 99 /** 81 100 * cmdq_get_shift_pa() - get the shift bits of physical address
+93
include/linux/soc/mediatek/mtk-cmdq.h
··· 23 23 #define CMDQ_THR_SPR_IDX2 (2) 24 24 #define CMDQ_THR_SPR_IDX3 (3) 25 25 26 + #define CMDQ_SUBSYS_INVALID (U8_MAX) 27 + 26 28 struct cmdq_pkt; 27 29 28 30 enum cmdq_logic_op { ··· 54 52 55 53 struct cmdq_client_reg { 56 54 u8 subsys; 55 + phys_addr_t pa_base; 57 56 u16 offset; 58 57 u16 size; 58 + 59 + /* 60 + * Client only uses these functions for MMIO access, 61 + * so doesn't need to handle the mminfra_offset. 62 + * The mminfra_offset is used for DRAM access and 63 + * is handled internally by CMDQ APIs. 64 + */ 65 + int (*pkt_write)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base, 66 + u16 offset, u32 value); 67 + int (*pkt_write_mask)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base, 68 + u16 offset, u32 value, u32 mask); 59 69 }; 60 70 61 71 struct cmdq_client { ··· 136 122 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value); 137 123 138 124 /** 125 + * cmdq_pkt_write_pa() - append write command to the CMDQ packet with pa_base 126 + * @pkt: the CMDQ packet 127 + * @subsys: unused parameter 128 + * @pa_base: the physical address base of the hardware register 129 + * @offset: register offset from CMDQ sub system 130 + * @value: the specified target register value 131 + * 132 + * Return: 0 for success; else the error code is returned 133 + */ 134 + int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/, 135 + u32 pa_base, u16 offset, u32 value); 136 + 137 + /** 138 + * cmdq_pkt_write_subsys() - append write command to the CMDQ packet with subsys 139 + * @pkt: the CMDQ packet 140 + * @subsys: the CMDQ sub system code 141 + * @pa_base: unused parameter 142 + * @offset: register offset from CMDQ sub system 143 + * @value: the specified target register value 144 + * 145 + * Return: 0 for success; else the error code is returned 146 + */ 147 + int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys, 148 + u32 pa_base /*unused*/, u16 offset, u32 value); 149 + 150 + /** 139 151 * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet 140 152 * @pkt: the CMDQ packet 141 153 * @subsys: the CMDQ sub system code ··· 173 133 */ 174 134 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, 175 135 u16 offset, u32 value, u32 mask); 136 + 137 + /** 138 + * cmdq_pkt_write_mask_pa() - append write command with mask to the CMDQ packet with pa 139 + * @pkt: the CMDQ packet 140 + * @subsys: unused parameter 141 + * @pa_base: the physical address base of the hardware register 142 + * @offset: register offset from CMDQ sub system 143 + * @value: the specified target register value 144 + * @mask: the specified target register mask 145 + * 146 + * Return: 0 for success; else the error code is returned 147 + */ 148 + int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/, 149 + u32 pa_base, u16 offset, u32 value, u32 mask); 150 + 151 + /** 152 + * cmdq_pkt_write_mask_subsys() - append write command with mask to the CMDQ packet with subsys 153 + * @pkt: the CMDQ packet 154 + * @subsys: the CMDQ sub system code 155 + * @pa_base: unused parameter 156 + * @offset: register offset from CMDQ sub system 157 + * @value: the specified target register value 158 + * @mask: the specified target register mask 159 + * 160 + * Return: 0 for success; else the error code is returned 161 + */ 162 + int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys, 163 + u32 pa_base /*unused*/, u16 offset, u32 value, u32 mask); 176 164 177 165 /* 178 166 * cmdq_pkt_read_s() - append read_s command to the CMDQ packet ··· 486 418 return -ENOENT; 487 419 } 488 420 421 + static inline int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/, 422 + u32 pa_base, u16 offset, u32 value) 423 + { 424 + return -ENOENT; 425 + } 426 + 427 + static inline int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys, 428 + u32 pa_base /*unused*/, u16 offset, u32 value) 429 + { 430 + return -ENOENT; 431 + } 432 + 489 433 static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, 490 434 u16 offset, u32 value, u32 mask) 435 + { 436 + return -ENOENT; 437 + } 438 + 439 + static inline int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/, 440 + u32 pa_base, u16 offset, u32 value, u32 mask) 441 + { 442 + return -ENOENT; 443 + } 444 + 445 + static inline int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys, 446 + u32 pa_base /*unused*/, u16 offset, 447 + u32 value, u32 mask) 491 448 { 492 449 return -ENOENT; 493 450 }