1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
9 #include "ivpu_jsm_msg.h"
11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
13 #define IVPU_CASE_TO_STR(x) case x: return #x
15 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
16 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
17 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
18 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
19 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
20 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
21 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
22 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
23 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
24 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
25 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
26 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
27 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
28 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
29 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
30 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
31 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
32 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
33 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
34 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
35 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
36 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
37 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
38 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
39 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
40 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
41 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
42 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
43 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
44 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
45 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
46 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
47 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
48 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
49 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
50 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
51 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
52 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
53 IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
54 IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
55 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
56 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
57 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
58 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
59 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
60 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
61 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
62 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
63 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
64 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
65 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
66 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
67 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
68 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
69 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
70 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
71 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
72 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
73 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
74 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
75 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
76 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
77 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
78 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
79 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
80 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
81 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
82 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
83 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
84 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
85 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
86 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
88 #undef IVPU_CASE_TO_STR
90 return "Unknown JSM message type";
93 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
94 u64 jobq_base, u32 jobq_size)
96 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
97 struct vpu_jsm_msg resp;
100 req.payload.register_db.db_idx = db_id;
101 req.payload.register_db.jobq_base = jobq_base;
102 req.payload.register_db.jobq_size = jobq_size;
103 req.payload.register_db.host_ssid = ctx_id;
105 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
106 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
108 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
113 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
115 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
116 struct vpu_jsm_msg resp;
119 req.payload.unregister_db.db_idx = db_id;
121 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
122 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
124 ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
129 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
131 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
132 struct vpu_jsm_msg resp;
135 if (engine != VPU_ENGINE_COMPUTE)
138 req.payload.query_engine_hb.engine_idx = engine;
140 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
141 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
143 ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
148 *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
152 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
154 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
155 struct vpu_jsm_msg resp;
158 if (engine != VPU_ENGINE_COMPUTE)
161 req.payload.engine_reset.engine_idx = engine;
163 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
164 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
166 ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
171 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
173 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
174 struct vpu_jsm_msg resp;
177 if (engine != VPU_ENGINE_COMPUTE)
180 req.payload.engine_preempt.engine_idx = engine;
181 req.payload.engine_preempt.preempt_id = preempt_id;
183 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
184 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
186 ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
191 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
193 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
194 struct vpu_jsm_msg resp;
197 strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
199 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
200 VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
202 ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
208 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
209 u64 *trace_hw_component_mask)
211 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
212 struct vpu_jsm_msg resp;
215 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
216 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
218 ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
222 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
223 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
228 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
229 u64 trace_hw_component_mask)
231 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
232 struct vpu_jsm_msg resp;
235 req.payload.trace_config.trace_level = trace_level;
236 req.payload.trace_config.trace_destination_mask = trace_destination_mask;
237 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
239 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
240 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
242 ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
247 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
249 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
250 struct vpu_jsm_msg resp;
253 req.payload.ssid_release.host_ssid = host_ssid;
255 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
256 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
258 ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
263 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
265 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
266 struct vpu_jsm_msg resp;
269 if (IVPU_WA(disable_d0i3_msg))
272 req.payload.pwr_d0i3_enter.send_response = 1;
274 ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
275 VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
279 return ivpu_hw_wait_for_idle(vdev);
282 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
283 u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
285 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
286 struct vpu_jsm_msg resp;
289 req.payload.hws_create_cmdq.host_ssid = ctx_id;
290 req.payload.hws_create_cmdq.process_id = pid;
291 req.payload.hws_create_cmdq.engine_idx = engine;
292 req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
293 req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
294 req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
295 req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
297 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
298 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
300 ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
305 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
307 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
308 struct vpu_jsm_msg resp;
311 req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
312 req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
314 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
315 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
317 ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
322 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
323 u64 cmdq_base, u32 cmdq_size)
325 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
326 struct vpu_jsm_msg resp;
329 req.payload.hws_register_db.db_id = db_id;
330 req.payload.hws_register_db.host_ssid = ctx_id;
331 req.payload.hws_register_db.cmdq_id = cmdq_id;
332 req.payload.hws_register_db.cmdq_base = cmdq_base;
333 req.payload.hws_register_db.cmdq_size = cmdq_size;
335 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
336 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
338 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
343 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
345 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
346 struct vpu_jsm_msg resp;
349 if (engine != VPU_ENGINE_COMPUTE)
352 req.payload.hws_resume_engine.engine_idx = engine;
354 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
355 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
357 ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
362 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
365 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
366 struct vpu_jsm_msg resp;
369 req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
370 req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
371 req.payload.hws_set_context_sched_properties.priority_band = priority;
372 req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
373 req.payload.hws_set_context_sched_properties.in_process_priority = 0;
374 req.payload.hws_set_context_sched_properties.context_quantum = 20000;
375 req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
376 req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
378 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
379 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
381 ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
386 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
387 u64 vpu_log_buffer_va)
389 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
390 struct vpu_jsm_msg resp;
393 req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
394 req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
395 req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
396 req.payload.hws_set_scheduling_log.notify_index = 0;
398 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
399 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
401 ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
406 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
408 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
409 struct vpu_jsm_msg resp;
413 req.payload.hws_priority_band_setup.grace_period[0] = 0;
414 req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
415 req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
417 req.payload.hws_priority_band_setup.grace_period[1] = 50000;
418 req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
419 req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
421 req.payload.hws_priority_band_setup.grace_period[2] = 50000;
422 req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
423 req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
425 req.payload.hws_priority_band_setup.grace_period[3] = 0;
426 req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
427 req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
429 req.payload.hws_priority_band_setup.normal_band_percentage = 10;
431 ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
432 &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
434 ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
439 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
440 u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
442 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
443 struct vpu_jsm_msg resp;
446 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
447 req.payload.metric_streamer_start.sampling_rate = sampling_rate;
448 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
449 req.payload.metric_streamer_start.buffer_size = buffer_size;
451 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
452 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
454 ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
461 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
463 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
464 struct vpu_jsm_msg resp;
467 req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
469 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
470 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
472 ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
477 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
478 u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
480 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
481 struct vpu_jsm_msg resp;
484 req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
485 req.payload.metric_streamer_update.buffer_addr = buffer_addr;
486 req.payload.metric_streamer_update.buffer_size = buffer_size;
488 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
489 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
491 ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
495 if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
496 ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
497 resp.payload.metric_streamer_done.bytes_written, buffer_size);
501 *bytes_written = resp.payload.metric_streamer_done.bytes_written;
506 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
507 u64 buffer_size, u32 *sample_size, u64 *info_size)
509 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
510 struct vpu_jsm_msg resp;
513 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
514 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
515 req.payload.metric_streamer_start.buffer_size = buffer_size;
517 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
518 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
520 ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
524 if (!resp.payload.metric_streamer_done.sample_size) {
525 ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
530 *sample_size = resp.payload.metric_streamer_done.sample_size;
532 *info_size = resp.payload.metric_streamer_done.bytes_written;
537 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
539 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
540 struct vpu_jsm_msg resp;
542 req.payload.pwr_dct_control.dct_active_us = active_us;
543 req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
545 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
546 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
549 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
551 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
552 struct vpu_jsm_msg resp;
554 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
555 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
558 int ivpu_jsm_state_dump(struct ivpu_device *vdev)
560 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
562 return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
563 vdev->timeout.state_dump_msg);