Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2025 Intel Corporation
4 */
5
6#include <linux/units.h>
7
8#include "ivpu_drv.h"
9#include "ivpu_hw.h"
10#include "ivpu_hw_btrs.h"
11#include "ivpu_hw_btrs_lnl_reg.h"
12#include "ivpu_hw_btrs_mtl_reg.h"
13#include "ivpu_hw_reg_io.h"
14#include "ivpu_pm.h"
15
16#define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
17 (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
18
19#define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
20 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
21 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
22 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
23 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
24 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
25
26#define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
27 FREQ_CHANGE)))
28
29#define BTRS_IRQ_DISABLE_MASK ((u32)-1)
30
31#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
32
33
34#define PLL_CDYN_DEFAULT 0x80
35#define PLL_EPP_DEFAULT 0x80
36#define PLL_REF_CLK_FREQ 50000000ull
37#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
38
39#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
40#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
41#define TIMEOUT_US (150 * USEC_PER_MSEC)
42
43/* Work point configuration values */
44#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
45#define MTL_CONFIG_1_TILE 0x01
46#define MTL_CONFIG_2_TILE 0x02
47#define MTL_PLL_RATIO_5_3 0x01
48#define MTL_PLL_RATIO_4_3 0x02
49#define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
50#define BTRS_MTL_TILE_SKU_BOTH 0x3630
51
52#define BTRS_LNL_TILE_MAX_NUM 6
53#define BTRS_LNL_TILE_MAX_MASK 0x3f
54
55#define WEIGHTS_DEFAULT 0xf711f711u
56#define WEIGHTS_ATS_DEFAULT 0x0000f711u
57
58#define DCT_REQ 0x2
59#define DCT_ENABLE 0x1
60#define DCT_DISABLE 0x0
61
62static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
63
64int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
65{
66 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
67 if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) {
68 /* Writing 1s does not clear the interrupt status register */
69 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
70 return true;
71 }
72
73 return false;
74}
75
76static void freq_ratios_init_mtl(struct ivpu_device *vdev)
77{
78 struct ivpu_hw_info *hw = vdev->hw;
79 u32 fmin_fuse, fmax_fuse;
80
81 fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE);
82 hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
83 hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse);
84
85 fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE);
86 hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
87}
88
89static void freq_ratios_init_lnl(struct ivpu_device *vdev)
90{
91 struct ivpu_hw_info *hw = vdev->hw;
92 u32 fmin_fuse, fmax_fuse;
93
94 fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE);
95 hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
96 hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse);
97
98 fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE);
99 hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
100}
101
102void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev)
103{
104 struct ivpu_hw_info *hw = vdev->hw;
105
106 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
107 freq_ratios_init_mtl(vdev);
108 else
109 freq_ratios_init_lnl(vdev);
110
111 hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
112 hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
113 hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
114}
115
116static bool tile_disable_check(u32 config)
117{
118 /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
119 if (config == 0)
120 return true;
121
122 if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1))
123 return false;
124
125 if ((config & (config - 1)) == 0)
126 return true;
127
128 return false;
129}
130
131static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config)
132{
133 u32 fuse;
134 u32 config;
135
136 fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE);
137 if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) {
138 ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
139 return -EIO;
140 }
141
142 config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
143 if (!tile_disable_check(config))
144 ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
145
146 ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
147
148 *tile_fuse_config = config;
149 return 0;
150}
151
152static int info_init_mtl(struct ivpu_device *vdev)
153{
154 struct ivpu_hw_info *hw = vdev->hw;
155
156 hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
157 hw->sku = BTRS_MTL_TILE_SKU_BOTH;
158 hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
159
160 return 0;
161}
162
163static int info_init_lnl(struct ivpu_device *vdev)
164{
165 struct ivpu_hw_info *hw = vdev->hw;
166 u32 tile_fuse_config;
167 int ret;
168
169 ret = read_tile_config_fuse(vdev, &tile_fuse_config);
170 if (ret)
171 return ret;
172
173 hw->tile_fuse = tile_fuse_config;
174 hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
175
176 return 0;
177}
178
179int ivpu_hw_btrs_info_init(struct ivpu_device *vdev)
180{
181 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
182 return info_init_mtl(vdev);
183 else
184 return info_init_lnl(vdev);
185}
186
187static int wp_request_sync(struct ivpu_device *vdev)
188{
189 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
190 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
191 else
192 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
193}
194
195static int wait_for_status_ready(struct ivpu_device *vdev, bool enable)
196{
197 u32 exp_val = enable ? 0x1 : 0x0;
198
199 if (IVPU_WA(punit_disabled))
200 return 0;
201
202 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
203 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
204 else
205 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
206}
207
208struct wp_request {
209 u16 min;
210 u16 max;
211 u16 target;
212 u16 cfg;
213 u16 epp;
214 u16 cdyn;
215};
216
217static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp)
218{
219 u32 val;
220
221 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0);
222 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
223 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
224 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val);
225
226 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1);
227 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
228 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val);
229 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val);
230
231 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2);
232 val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
233 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val);
234
235 val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD);
236 val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val);
237 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val);
238}
239
240static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp)
241{
242 u32 val;
243
244 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0);
245 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
246 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
247 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val);
248
249 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1);
250 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
251 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val);
252 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val);
253
254 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2);
255 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
256 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val);
257 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val);
258
259 val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD);
260 val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val);
261 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val);
262}
263
264static void wp_request(struct ivpu_device *vdev, struct wp_request *wp)
265{
266 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
267 wp_request_mtl(vdev, wp);
268 else
269 wp_request_lnl(vdev, wp);
270}
271
272static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp)
273{
274 int ret;
275
276 ret = wp_request_sync(vdev);
277 if (ret) {
278 ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret);
279 return ret;
280 }
281
282 wp_request(vdev, wp);
283
284 ret = wp_request_sync(vdev);
285 if (ret)
286 ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret);
287
288 return ret;
289}
290
291static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable)
292{
293 struct ivpu_hw_info *hw = vdev->hw;
294
295 wp->min = hw->pll.min_ratio;
296 wp->max = hw->pll.max_ratio;
297
298 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
299 wp->target = enable ? hw->pll.pn_ratio : 0;
300 wp->cfg = enable ? hw->config : 0;
301 wp->cdyn = 0;
302 wp->epp = 0;
303 } else {
304 wp->target = hw->pll.pn_ratio;
305 wp->cfg = 0;
306 wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
307 wp->epp = enable ? PLL_EPP_DEFAULT : 0;
308 }
309}
310
311static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
312{
313 u32 exp_val = enable ? 0x1 : 0x0;
314
315 if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL)
316 return 0;
317
318 if (IVPU_WA(punit_disabled))
319 return 0;
320
321 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
322}
323
324static int wait_for_cdyn_deassert(struct ivpu_device *vdev)
325{
326 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
327 return 0;
328
329 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US);
330}
331
332int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
333{
334 struct wp_request wp;
335 int ret;
336
337 if (IVPU_WA(punit_disabled)) {
338 ivpu_dbg(vdev, PM, "Skipping workpoint request\n");
339 return 0;
340 }
341
342 prepare_wp_request(vdev, &wp, enable);
343
344 ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
345 pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
346
347 ret = wp_request_send(vdev, &wp);
348 if (ret) {
349 ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret);
350 return ret;
351 }
352
353 ret = wait_for_pll_lock(vdev, enable);
354 if (ret) {
355 ivpu_err(vdev, "Timed out waiting for PLL lock\n");
356 return ret;
357 }
358
359 ret = wait_for_status_ready(vdev, enable);
360 if (ret) {
361 ivpu_err(vdev, "Timed out waiting for NPU ready status\n");
362 return ret;
363 }
364
365 if (!enable) {
366 ret = wait_for_cdyn_deassert(vdev);
367 if (ret) {
368 ivpu_err(vdev, "Timed out waiting for CDYN deassert\n");
369 return ret;
370 }
371 }
372
373 return 0;
374}
375
376static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable)
377{
378 int ret;
379 u32 val;
380
381 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
382 if (ret) {
383 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
384 return ret;
385 }
386
387 val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL);
388 if (enable)
389 val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
390 else
391 val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
392 REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val);
393
394 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
395 if (ret)
396 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
397
398 return ret;
399}
400
401static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable)
402{
403 int ret;
404 u32 val;
405
406 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
407 if (ret) {
408 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
409 return ret;
410 }
411
412 val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL);
413 if (enable)
414 val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
415 else
416 val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
417 REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val);
418
419 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
420 if (ret) {
421 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
422 return ret;
423 }
424
425 return 0;
426}
427
428static int d0i3_drive(struct ivpu_device *vdev, bool enable)
429{
430 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
431 return d0i3_drive_mtl(vdev, enable);
432 else
433 return d0i3_drive_lnl(vdev, enable);
434}
435
436int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev)
437{
438 int ret;
439
440 if (IVPU_WA(punit_disabled))
441 return 0;
442
443 ret = d0i3_drive(vdev, true);
444 if (ret)
445 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
446
447 udelay(5); /* VPU requires 5 us to complete the transition */
448
449 return ret;
450}
451
452int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev)
453{
454 int ret;
455
456 if (IVPU_WA(punit_disabled))
457 return 0;
458
459 ret = d0i3_drive(vdev, false);
460 if (ret)
461 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
462
463 return ret;
464}
465
466int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
467{
468 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
469 return 0;
470
471 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
472}
473
474void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev)
475{
476 REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
477 REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
478}
479
480static int ip_reset_mtl(struct ivpu_device *vdev)
481{
482 int ret;
483 u32 val;
484
485 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
486 if (ret) {
487 ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
488 return ret;
489 }
490
491 val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET);
492 val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val);
493 REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val);
494
495 ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
496 if (ret)
497 ivpu_err(vdev, "Timed out waiting for RESET completion\n");
498
499 return ret;
500}
501
502static int ip_reset_lnl(struct ivpu_device *vdev)
503{
504 int ret;
505 u32 val;
506
507 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
508
509 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
510 if (ret) {
511 ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
512 return ret;
513 }
514
515 val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET);
516 val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val);
517 REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val);
518
519 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
520 if (ret)
521 ivpu_err(vdev, "Timed out waiting for RESET completion\n");
522
523 return ret;
524}
525
526int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev)
527{
528 if (IVPU_WA(punit_disabled))
529 return 0;
530
531 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
532 return ip_reset_mtl(vdev);
533 else
534 return ip_reset_lnl(vdev);
535}
536
537void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev)
538{
539 u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
540
541 if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
542 val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
543 else
544 val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
545
546 REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
547}
548
549void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev)
550{
551 ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
552 REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable");
553}
554
555void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev)
556{
557 u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
558
559 val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
560 REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
561}
562
563bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev)
564{
565 u32 val;
566
567 if (IVPU_WA(punit_disabled))
568 return true;
569
570 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
571 val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS);
572
573 return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) &&
574 REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val);
575 } else {
576 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
577
578 return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) &&
579 REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val);
580 }
581}
582
583int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
584{
585 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
586 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
587 else
588 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
589}
590
591static u32 pll_config_get_mtl(struct ivpu_device *vdev)
592{
593 return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
594}
595
596static u32 pll_config_get_lnl(struct ivpu_device *vdev)
597{
598 return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
599}
600
601static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
602{
603 return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
604}
605
606static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
607{
608 return PLL_RATIO_TO_FREQ(ratio) / 2;
609}
610
611static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
612{
613 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
614 return pll_ratio_to_dpu_freq_mtl(ratio);
615 else
616 return pll_ratio_to_dpu_freq_lnl(ratio);
617}
618
619u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
620{
621 return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
622}
623
624u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
625{
626 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
627 return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
628 else
629 return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
630}
631
632/* Handler for IRQs from Buttress core (irqB) */
633bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
634{
635 u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
636 bool schedule_recovery = false;
637
638 if (!status)
639 return false;
640
641 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
642 u32 pll = pll_config_get_mtl(vdev);
643
644 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
645 pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
646 }
647
648 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
649 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
650 REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1);
651 schedule_recovery = true;
652 }
653
654 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) {
655 u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
656
657 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
658 ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log),
659 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log),
660 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log));
661 REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1);
662 schedule_recovery = true;
663 }
664
665 /* This must be done after interrupts are cleared at the source. */
666 if (IVPU_WA(interrupt_clear_with_0))
667 /*
668 * Writing 1 triggers an interrupt, so we can't perform read update write.
669 * Clear local interrupt status by writing 0 to all bits.
670 */
671 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
672 else
673 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status);
674
675 if (schedule_recovery)
676 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
677
678 return true;
679}
680
681/* Handler for IRQs from Buttress core (irqB) */
682bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
683{
684 u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
685 bool schedule_recovery = false;
686
687 if (!status)
688 return false;
689
690 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
691 ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
692 queue_work(system_percpu_wq, &vdev->irq_dct_work);
693 }
694
695 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
696 u32 pll = pll_config_get_lnl(vdev);
697
698 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
699 pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
700 }
701
702 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
703 ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
704 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
705 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
706 REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1);
707 schedule_recovery = true;
708 }
709
710 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) {
711 ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
712 REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1);
713 schedule_recovery = true;
714 }
715
716 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) {
717 ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
718 REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1);
719 schedule_recovery = true;
720 }
721
722 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) {
723 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
724 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
725 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
726 REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1);
727 schedule_recovery = true;
728 }
729
730 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) {
731 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
732 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
733 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
734 REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1);
735 schedule_recovery = true;
736 }
737
738 /* This must be done after interrupts are cleared at the source. */
739 REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status);
740
741 if (schedule_recovery)
742 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
743
744 return true;
745}
746
747int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
748{
749 u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
750 u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
751 u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
752
753 if (cmd != DCT_REQ) {
754 ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
755 return -EBADR;
756 }
757
758 switch (param1) {
759 case DCT_ENABLE:
760 *enable = true;
761 return 0;
762 case DCT_DISABLE:
763 *enable = false;
764 return 0;
765 default:
766 ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
767 return -EINVAL;
768 }
769}
770
771void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent)
772{
773 u32 val = 0;
774 u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
775
776 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
777 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
778 val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
779
780 REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
781}
782
783u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
784{
785 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
786 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET);
787 else
788 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET);
789}
790
791u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev)
792{
793 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
794 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE);
795 else
796 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE);
797}
798
799u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev)
800{
801 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
802 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE);
803 else
804 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE);
805}
806
807void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev)
808{
809 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
810 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
811 else
812 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
813}
814
815void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev)
816{
817 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
818 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
819 else
820 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
821}
822
823void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev)
824{
825 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
826 REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK));
827 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
828 } else {
829 REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK));
830 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
831 }
832}
833
834void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev)
835{
836 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
837 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
838 REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
839 } else {
840 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
841 REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
842 }
843}
844
845static void diagnose_failure_mtl(struct ivpu_device *vdev)
846{
847 u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
848
849 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg))
850 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
851
852 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) {
853 u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
854
855 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
856 log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log),
857 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log),
858 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log));
859 }
860}
861
862static void diagnose_failure_lnl(struct ivpu_device *vdev)
863{
864 u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
865
866 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) {
867 ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
868 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
869 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
870 }
871
872 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg))
873 ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
874
875 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg))
876 ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
877
878 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg))
879 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
880 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
881 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
882
883 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg))
884 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
885 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
886 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
887
888 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg))
889 ivpu_err(vdev, "Survivability IRQ\n");
890}
891
892void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
893{
894 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
895 return diagnose_failure_mtl(vdev);
896 else
897 return diagnose_failure_lnl(vdev);
898}
899
900int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
901{
902 u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
903
904 return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
905}