Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6#include "ivpu_drv.h"
7#include "ivpu_hw.h"
8#include "ivpu_ipc.h"
9#include "ivpu_jsm_msg.h"
10#include "ivpu_pm.h"
11#include "vpu_jsm_api.h"
12
13const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
14{
15 #define IVPU_CASE_TO_STR(x) case x: return #x
16 switch (type) {
17 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
18 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
19 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
20 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
21 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
22 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
23 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
24 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
25 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
26 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
27 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
28 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
29 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
30 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
31 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
32 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
33 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
34 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
35 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
36 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
37 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
38 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
39 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
40 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
41 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
42 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
43 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
44 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
45 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
46 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
47 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
48 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
49 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
50 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
51 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
52 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
53 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
54 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
55 IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
56 IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
57 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
58 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
59 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
60 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
61 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
62 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
63 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
64 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
65 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
66 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
67 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
68 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
69 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
70 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
71 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
72 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
73 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
74 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
75 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
76 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
77 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
78 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
79 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
80 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
81 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
82 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
83 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
84 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
85 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
86 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
87 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
88 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
89 }
90 #undef IVPU_CASE_TO_STR
91
92 return "Unknown JSM message type";
93}
94
95int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
96 u64 jobq_base, u32 jobq_size)
97{
98 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
99 struct vpu_jsm_msg resp;
100 int ret = 0;
101
102 req.payload.register_db.db_idx = db_id;
103 req.payload.register_db.jobq_base = jobq_base;
104 req.payload.register_db.jobq_size = jobq_size;
105 req.payload.register_db.host_ssid = ctx_id;
106
107 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
108 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
109 if (ret)
110 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
111
112 return ret;
113}
114
115int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
116{
117 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
118 struct vpu_jsm_msg resp;
119 int ret = 0;
120
121 req.payload.unregister_db.db_idx = db_id;
122
123 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
124 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
125 if (ret)
126 ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
127
128 return ret;
129}
130
131int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
132{
133 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
134 struct vpu_jsm_msg resp;
135 int ret;
136
137 if (engine != VPU_ENGINE_COMPUTE)
138 return -EINVAL;
139
140 req.payload.query_engine_hb.engine_idx = engine;
141
142 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
143 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
144 if (ret) {
145 ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
146 engine, ret);
147 return ret;
148 }
149
150 *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
151 return ret;
152}
153
154int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine, struct vpu_jsm_msg *resp)
155{
156 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
157 int ret;
158
159 if (engine != VPU_ENGINE_COMPUTE)
160 return -EINVAL;
161
162 req.payload.engine_reset.engine_idx = engine;
163
164 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, resp,
165 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
166 if (ret) {
167 ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
168 ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
169 return ret;
170 }
171
172 atomic_inc(&vdev->pm->engine_reset_counter);
173
174 return 0;
175}
176
177int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
178{
179 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
180 struct vpu_jsm_msg resp;
181 int ret;
182
183 if (engine != VPU_ENGINE_COMPUTE)
184 return -EINVAL;
185
186 req.payload.engine_preempt.engine_idx = engine;
187 req.payload.engine_preempt.preempt_id = preempt_id;
188
189 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
190 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
191 if (ret)
192 ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
193
194 return ret;
195}
196
197int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
198{
199 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
200 struct vpu_jsm_msg resp;
201 int ret;
202
203 strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
204
205 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
206 VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
207 if (ret)
208 ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
209 command, ret);
210
211 return ret;
212}
213
214int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
215 u64 *trace_hw_component_mask)
216{
217 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
218 struct vpu_jsm_msg resp;
219 int ret;
220
221 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
222 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
223 if (ret) {
224 ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
225 return ret;
226 }
227
228 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
229 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
230
231 return ret;
232}
233
234int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
235 u64 trace_hw_component_mask)
236{
237 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
238 struct vpu_jsm_msg resp;
239 int ret;
240
241 req.payload.trace_config.trace_level = trace_level;
242 req.payload.trace_config.trace_destination_mask = trace_destination_mask;
243 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
244
245 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
246 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
247 if (ret)
248 ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
249
250 return ret;
251}
252
253int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
254{
255 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
256 struct vpu_jsm_msg resp;
257 int ret;
258
259 req.payload.ssid_release.host_ssid = host_ssid;
260
261 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
262 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
263 if (ret)
264 ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
265
266 return ret;
267}
268
269int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
270{
271 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
272 struct vpu_jsm_msg resp;
273 int ret;
274
275 if (IVPU_WA(disable_d0i3_msg))
276 return 0;
277
278 req.payload.pwr_d0i3_enter.send_response = 1;
279
280 ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
281 VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
282 if (ret)
283 return ret;
284
285 return ivpu_hw_wait_for_idle(vdev);
286}
287
288int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
289 u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
290{
291 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
292 struct vpu_jsm_msg resp;
293 int ret;
294
295 req.payload.hws_create_cmdq.host_ssid = ctx_id;
296 req.payload.hws_create_cmdq.process_id = pid;
297 req.payload.hws_create_cmdq.engine_idx = engine;
298 req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
299 req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
300 req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
301 req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
302
303 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
304 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
305 if (ret)
306 ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
307
308 return ret;
309}
310
311int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
312{
313 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
314 struct vpu_jsm_msg resp;
315 int ret;
316
317 req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
318 req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
319
320 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
321 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
322 if (ret)
323 ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
324
325 return ret;
326}
327
328int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
329 u64 cmdq_base, u32 cmdq_size)
330{
331 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
332 struct vpu_jsm_msg resp;
333 int ret = 0;
334
335 req.payload.hws_register_db.db_id = db_id;
336 req.payload.hws_register_db.host_ssid = ctx_id;
337 req.payload.hws_register_db.cmdq_id = cmdq_id;
338 req.payload.hws_register_db.cmdq_base = cmdq_base;
339 req.payload.hws_register_db.cmdq_size = cmdq_size;
340
341 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
342 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
343 if (ret)
344 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
345
346 return ret;
347}
348
349int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
350{
351 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
352 struct vpu_jsm_msg resp;
353 int ret;
354
355 if (engine != VPU_ENGINE_COMPUTE)
356 return -EINVAL;
357
358 req.payload.hws_resume_engine.engine_idx = engine;
359
360 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
361 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
362 if (ret) {
363 ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
364 ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
365 }
366
367 return ret;
368}
369
370int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
371 u32 priority)
372{
373 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
374 struct vpu_jsm_msg resp;
375 int ret;
376
377 req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
378 req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
379 req.payload.hws_set_context_sched_properties.priority_band = priority;
380 req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
381 req.payload.hws_set_context_sched_properties.in_process_priority = 0;
382 req.payload.hws_set_context_sched_properties.context_quantum = 20000;
383 req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
384 req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
385
386 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
387 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
388 if (ret)
389 ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
390
391 return ret;
392}
393
394int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
395 u64 vpu_log_buffer_va)
396{
397 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
398 struct vpu_jsm_msg resp;
399 int ret;
400
401 req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
402 req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
403 req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
404 req.payload.hws_set_scheduling_log.notify_index = 0;
405
406 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
407 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
408 if (ret)
409 ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
410
411 return ret;
412}
413
414int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
415{
416 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
417 struct vpu_jsm_msg resp;
418 struct ivpu_hw_info *hw = vdev->hw;
419 struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
420 &req.payload.hws_priority_band_setup;
421 int ret;
422
423 for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
424 band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
425 setup->grace_period[band] = hw->hws.grace_period[band];
426 setup->process_grace_period[band] = hw->hws.process_grace_period[band];
427 setup->process_quantum[band] = hw->hws.process_quantum[band];
428 }
429 setup->normal_band_percentage = 10;
430
431 ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
432 &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
433 if (ret)
434 ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
435
436 return ret;
437}
438
439int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
440 u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
441{
442 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
443 struct vpu_jsm_msg resp;
444 int ret;
445
446 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
447 req.payload.metric_streamer_start.sampling_rate = sampling_rate;
448 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
449 req.payload.metric_streamer_start.buffer_size = buffer_size;
450
451 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
452 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
453 if (ret) {
454 ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
455 return ret;
456 }
457
458 return ret;
459}
460
461int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
462{
463 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
464 struct vpu_jsm_msg resp;
465 int ret;
466
467 req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
468
469 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
470 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
471 if (ret)
472 ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
473
474 return ret;
475}
476
477int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
478 u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
479{
480 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
481 struct vpu_jsm_msg resp;
482 int ret;
483
484 req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
485 req.payload.metric_streamer_update.buffer_addr = buffer_addr;
486 req.payload.metric_streamer_update.buffer_size = buffer_size;
487
488 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
489 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
490 if (ret) {
491 ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
492 return ret;
493 }
494
495 if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
496 ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
497 resp.payload.metric_streamer_done.bytes_written, buffer_size);
498 return -EOVERFLOW;
499 }
500
501 *bytes_written = resp.payload.metric_streamer_done.bytes_written;
502
503 return ret;
504}
505
506int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
507 u64 buffer_size, u32 *sample_size, u64 *info_size)
508{
509 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
510 struct vpu_jsm_msg resp;
511 int ret;
512
513 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
514 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
515 req.payload.metric_streamer_start.buffer_size = buffer_size;
516
517 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
518 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
519 if (ret) {
520 ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
521 return ret;
522 }
523
524 if (!resp.payload.metric_streamer_done.sample_size) {
525 ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
526 return -EBADMSG;
527 }
528
529 if (sample_size)
530 *sample_size = resp.payload.metric_streamer_done.sample_size;
531 if (info_size)
532 *info_size = resp.payload.metric_streamer_done.bytes_written;
533
534 return ret;
535}
536
537int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
538{
539 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
540 struct vpu_jsm_msg resp;
541
542 req.payload.pwr_dct_control.dct_active_us = active_us;
543 req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
544
545 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
546 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
547}
548
549int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
550{
551 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
552 struct vpu_jsm_msg resp;
553
554 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
555 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
556}
557
558int ivpu_jsm_state_dump(struct ivpu_device *vdev)
559{
560 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
561 struct vpu_jsm_msg resp;
562
563 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_STATE_DUMP_RSP, &resp,
564 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
565}
566
567int ivpu_jsm_state_dump_no_reply(struct ivpu_device *vdev)
568{
569 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
570
571 return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
572 vdev->timeout.state_dump_msg);
573}