]> Git Repo - J-linux.git/blob - drivers/accel/ivpu/ivpu_jsm_msg.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / accel / ivpu / ivpu_jsm_msg.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10
11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
12 {
13         #define IVPU_CASE_TO_STR(x) case x: return #x
14         switch (type) {
15         IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
16         IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
17         IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
18         IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
19         IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
20         IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
21         IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
22         IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
23         IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
24         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
25         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
26         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
27         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
28         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
29         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
30         IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
31         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
32         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
33         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
34         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
35         IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
36         IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
37         IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
38         IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
39         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
40         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
41         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
42         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
43         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
44         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
45         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
46         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
47         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
48         IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
49         IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
50         IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
51         IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
52         IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
53         IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
54         IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
55         IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
56         IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
57         IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
58         IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
59         IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
60         IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
61         IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
62         IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
63         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
64         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
65         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
66         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
67         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
68         IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
69         IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
70         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
71         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
72         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
73         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
74         IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
75         IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
76         IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
77         IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
78         IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
79         IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
80         IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
81         IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
82         IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
83         IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
84         IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
85         IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
86         IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
87         }
88         #undef IVPU_CASE_TO_STR
89
90         return "Unknown JSM message type";
91 }
92
93 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
94                          u64 jobq_base, u32 jobq_size)
95 {
96         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
97         struct vpu_jsm_msg resp;
98         int ret = 0;
99
100         req.payload.register_db.db_idx = db_id;
101         req.payload.register_db.jobq_base = jobq_base;
102         req.payload.register_db.jobq_size = jobq_size;
103         req.payload.register_db.host_ssid = ctx_id;
104
105         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
106                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
107         if (ret)
108                 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
109
110         return ret;
111 }
112
113 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
114 {
115         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
116         struct vpu_jsm_msg resp;
117         int ret = 0;
118
119         req.payload.unregister_db.db_idx = db_id;
120
121         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
122                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
123         if (ret)
124                 ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
125
126         return ret;
127 }
128
129 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
130 {
131         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
132         struct vpu_jsm_msg resp;
133         int ret;
134
135         if (engine != VPU_ENGINE_COMPUTE)
136                 return -EINVAL;
137
138         req.payload.query_engine_hb.engine_idx = engine;
139
140         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
141                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
142         if (ret) {
143                 ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
144                                      engine, ret);
145                 return ret;
146         }
147
148         *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
149         return ret;
150 }
151
152 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
153 {
154         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
155         struct vpu_jsm_msg resp;
156         int ret;
157
158         if (engine != VPU_ENGINE_COMPUTE)
159                 return -EINVAL;
160
161         req.payload.engine_reset.engine_idx = engine;
162
163         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
164                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
165         if (ret)
166                 ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
167
168         return ret;
169 }
170
171 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
172 {
173         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
174         struct vpu_jsm_msg resp;
175         int ret;
176
177         if (engine != VPU_ENGINE_COMPUTE)
178                 return -EINVAL;
179
180         req.payload.engine_preempt.engine_idx = engine;
181         req.payload.engine_preempt.preempt_id = preempt_id;
182
183         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
184                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
185         if (ret)
186                 ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
187
188         return ret;
189 }
190
191 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
192 {
193         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
194         struct vpu_jsm_msg resp;
195         int ret;
196
197         strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
198
199         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
200                                     VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
201         if (ret)
202                 ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
203                                       command, ret);
204
205         return ret;
206 }
207
208 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
209                                   u64 *trace_hw_component_mask)
210 {
211         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
212         struct vpu_jsm_msg resp;
213         int ret;
214
215         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
216                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
217         if (ret) {
218                 ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
219                 return ret;
220         }
221
222         *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
223         *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
224
225         return ret;
226 }
227
228 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
229                               u64 trace_hw_component_mask)
230 {
231         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
232         struct vpu_jsm_msg resp;
233         int ret;
234
235         req.payload.trace_config.trace_level = trace_level;
236         req.payload.trace_config.trace_destination_mask = trace_destination_mask;
237         req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
238
239         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
240                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
241         if (ret)
242                 ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
243
244         return ret;
245 }
246
247 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
248 {
249         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
250         struct vpu_jsm_msg resp;
251         int ret;
252
253         req.payload.ssid_release.host_ssid = host_ssid;
254
255         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
256                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
257         if (ret)
258                 ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
259
260         return ret;
261 }
262
263 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
264 {
265         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
266         struct vpu_jsm_msg resp;
267         int ret;
268
269         if (IVPU_WA(disable_d0i3_msg))
270                 return 0;
271
272         req.payload.pwr_d0i3_enter.send_response = 1;
273
274         ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
275                                              VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
276         if (ret)
277                 return ret;
278
279         return ivpu_hw_wait_for_idle(vdev);
280 }
281
282 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
283                              u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
284 {
285         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
286         struct vpu_jsm_msg resp;
287         int ret;
288
289         req.payload.hws_create_cmdq.host_ssid = ctx_id;
290         req.payload.hws_create_cmdq.process_id = pid;
291         req.payload.hws_create_cmdq.engine_idx = engine;
292         req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
293         req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
294         req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
295         req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
296
297         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
298                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
299         if (ret)
300                 ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
301
302         return ret;
303 }
304
305 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
306 {
307         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
308         struct vpu_jsm_msg resp;
309         int ret;
310
311         req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
312         req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
313
314         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
315                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
316         if (ret)
317                 ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
318
319         return ret;
320 }
321
322 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
323                              u64 cmdq_base, u32 cmdq_size)
324 {
325         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
326         struct vpu_jsm_msg resp;
327         int ret = 0;
328
329         req.payload.hws_register_db.db_id = db_id;
330         req.payload.hws_register_db.host_ssid = ctx_id;
331         req.payload.hws_register_db.cmdq_id = cmdq_id;
332         req.payload.hws_register_db.cmdq_base = cmdq_base;
333         req.payload.hws_register_db.cmdq_size = cmdq_size;
334
335         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
336                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
337         if (ret)
338                 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
339
340         return ret;
341 }
342
343 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
344 {
345         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
346         struct vpu_jsm_msg resp;
347         int ret;
348
349         if (engine != VPU_ENGINE_COMPUTE)
350                 return -EINVAL;
351
352         req.payload.hws_resume_engine.engine_idx = engine;
353
354         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
355                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
356         if (ret)
357                 ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
358
359         return ret;
360 }
361
362 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
363                                               u32 priority)
364 {
365         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
366         struct vpu_jsm_msg resp;
367         int ret;
368
369         req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
370         req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
371         req.payload.hws_set_context_sched_properties.priority_band = priority;
372         req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
373         req.payload.hws_set_context_sched_properties.in_process_priority = 0;
374         req.payload.hws_set_context_sched_properties.context_quantum = 20000;
375         req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
376         req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
377
378         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
379                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
380         if (ret)
381                 ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
382
383         return ret;
384 }
385
386 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
387                                     u64 vpu_log_buffer_va)
388 {
389         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
390         struct vpu_jsm_msg resp;
391         int ret;
392
393         req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
394         req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
395         req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
396         req.payload.hws_set_scheduling_log.notify_index = 0;
397
398         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
399                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
400         if (ret)
401                 ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
402
403         return ret;
404 }
405
406 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
407 {
408         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
409         struct vpu_jsm_msg resp;
410         int ret;
411
412         /* Idle */
413         req.payload.hws_priority_band_setup.grace_period[0] = 0;
414         req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
415         req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
416         /* Normal */
417         req.payload.hws_priority_band_setup.grace_period[1] = 50000;
418         req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
419         req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
420         /* Focus */
421         req.payload.hws_priority_band_setup.grace_period[2] = 50000;
422         req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
423         req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
424         /* Realtime */
425         req.payload.hws_priority_band_setup.grace_period[3] = 0;
426         req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
427         req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
428
429         req.payload.hws_priority_band_setup.normal_band_percentage = 10;
430
431         ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
432                                              &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
433         if (ret)
434                 ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
435
436         return ret;
437 }
438
439 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
440                                    u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
441 {
442         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
443         struct vpu_jsm_msg resp;
444         int ret;
445
446         req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
447         req.payload.metric_streamer_start.sampling_rate = sampling_rate;
448         req.payload.metric_streamer_start.buffer_addr = buffer_addr;
449         req.payload.metric_streamer_start.buffer_size = buffer_size;
450
451         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
452                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
453         if (ret) {
454                 ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
455                 return ret;
456         }
457
458         return ret;
459 }
460
461 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
462 {
463         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
464         struct vpu_jsm_msg resp;
465         int ret;
466
467         req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
468
469         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
470                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
471         if (ret)
472                 ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
473
474         return ret;
475 }
476
477 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
478                                     u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
479 {
480         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
481         struct vpu_jsm_msg resp;
482         int ret;
483
484         req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
485         req.payload.metric_streamer_update.buffer_addr = buffer_addr;
486         req.payload.metric_streamer_update.buffer_size = buffer_size;
487
488         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
489                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
490         if (ret) {
491                 ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
492                 return ret;
493         }
494
495         if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
496                 ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
497                                       resp.payload.metric_streamer_done.bytes_written, buffer_size);
498                 return -EOVERFLOW;
499         }
500
501         *bytes_written = resp.payload.metric_streamer_done.bytes_written;
502
503         return ret;
504 }
505
506 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
507                                   u64 buffer_size, u32 *sample_size, u64 *info_size)
508 {
509         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
510         struct vpu_jsm_msg resp;
511         int ret;
512
513         req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
514         req.payload.metric_streamer_start.buffer_addr = buffer_addr;
515         req.payload.metric_streamer_start.buffer_size = buffer_size;
516
517         ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
518                                     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
519         if (ret) {
520                 ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
521                 return ret;
522         }
523
524         if (!resp.payload.metric_streamer_done.sample_size) {
525                 ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
526                 return -EBADMSG;
527         }
528
529         if (sample_size)
530                 *sample_size = resp.payload.metric_streamer_done.sample_size;
531         if (info_size)
532                 *info_size = resp.payload.metric_streamer_done.bytes_written;
533
534         return ret;
535 }
536
537 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
538 {
539         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
540         struct vpu_jsm_msg resp;
541
542         req.payload.pwr_dct_control.dct_active_us = active_us;
543         req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
544
545         return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
546                                               VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
547 }
548
549 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
550 {
551         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
552         struct vpu_jsm_msg resp;
553
554         return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
555                                               VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
556 }
557
558 int ivpu_jsm_state_dump(struct ivpu_device *vdev)
559 {
560         struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
561
562         return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
563                                       vdev->timeout.state_dump_msg);
564 }
This page took 0.05714 seconds and 4 git commands to generate.