]> Git Repo - linux.git/blob - drivers/accel/ivpu/ivpu_ipc.c
accel/ivpu: Do not use cons->aborted for job_done_thread
[linux.git] / drivers / accel / ivpu / ivpu_ipc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5
6 #include <linux/genalloc.h>
7 #include <linux/highmem.h>
8 #include <linux/kthread.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/wait.h>
11
12 #include "ivpu_drv.h"
13 #include "ivpu_gem.h"
14 #include "ivpu_hw.h"
15 #include "ivpu_hw_reg_io.h"
16 #include "ivpu_ipc.h"
17 #include "ivpu_jsm_msg.h"
18 #include "ivpu_pm.h"
19
20 #define IPC_MAX_RX_MSG  128
21 #define IS_KTHREAD()    (get_current()->flags & PF_KTHREAD)
22
23 struct ivpu_ipc_tx_buf {
24         struct ivpu_ipc_hdr ipc;
25         struct vpu_jsm_msg jsm;
26 };
27
28 struct ivpu_ipc_rx_msg {
29         struct list_head link;
30         struct ivpu_ipc_hdr *ipc_hdr;
31         struct vpu_jsm_msg *jsm_msg;
32 };
33
34 static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
35                               struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
36 {
37         ivpu_dbg(vdev, IPC,
38                  "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
39                  c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
40                  ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
41 }
42
43 static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
44                               struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
45 {
46         u32 *payload = (u32 *)&jsm_msg->payload;
47
48         ivpu_dbg(vdev, JSM,
49                  "%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
50                  c, vpu_addr, ivpu_jsm_msg_type_to_str(jsm_msg->type),
51                  jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
52                  payload[0], payload[1], payload[2], payload[3], payload[4]);
53 }
54
55 static void
56 ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
57                       struct vpu_jsm_msg *jsm_msg)
58 {
59         ipc_hdr->status = IVPU_IPC_HDR_FREE;
60         if (jsm_msg)
61                 jsm_msg->status = VPU_JSM_MSG_FREE;
62         wmb(); /* Flush WC buffers for message statuses */
63 }
64
65 static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
66 {
67         struct ivpu_ipc_info *ipc = vdev->ipc;
68
69         ivpu_bo_free_internal(ipc->mem_rx);
70         ivpu_bo_free_internal(ipc->mem_tx);
71 }
72
73 static int
74 ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
75                     struct vpu_jsm_msg *req)
76 {
77         struct ivpu_ipc_info *ipc = vdev->ipc;
78         struct ivpu_ipc_tx_buf *tx_buf;
79         u32 tx_buf_vpu_addr;
80         u32 jsm_vpu_addr;
81
82         tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
83         if (!tx_buf_vpu_addr) {
84                 ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n",
85                                      sizeof(*tx_buf));
86                 return -ENOMEM;
87         }
88
89         tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
90         if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
91                 gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
92                 return -EIO;
93         }
94
95         jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
96
97         if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
98                 ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n",
99                                       tx_buf_vpu_addr);
100
101         if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
102                 ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n",
103                                       jsm_vpu_addr);
104
105         memset(tx_buf, 0, sizeof(*tx_buf));
106         tx_buf->ipc.data_addr = jsm_vpu_addr;
107         /* TODO: Set data_size to actual JSM message size, not union of all messages */
108         tx_buf->ipc.data_size = sizeof(*req);
109         tx_buf->ipc.channel = cons->channel;
110         tx_buf->ipc.src_node = 0;
111         tx_buf->ipc.dst_node = 1;
112         tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
113         tx_buf->jsm.type = req->type;
114         tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
115         tx_buf->jsm.payload = req->payload;
116
117         req->request_id = atomic_inc_return(&ipc->request_id);
118         tx_buf->jsm.request_id = req->request_id;
119         cons->request_id = req->request_id;
120         wmb(); /* Flush WC buffers for IPC, JSM msgs */
121
122         cons->tx_vpu_addr = tx_buf_vpu_addr;
123
124         ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
125         ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
126
127         return 0;
128 }
129
130 static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
131 {
132         struct ivpu_ipc_info *ipc = vdev->ipc;
133
134         if (vpu_addr)
135                 gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
136 }
137
138 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
139 {
140         ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
141 }
142
143 void
144 ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
145 {
146         struct ivpu_ipc_info *ipc = vdev->ipc;
147
148         INIT_LIST_HEAD(&cons->link);
149         cons->channel = channel;
150         cons->tx_vpu_addr = 0;
151         cons->request_id = 0;
152         cons->aborted = false;
153         spin_lock_init(&cons->rx_lock);
154         INIT_LIST_HEAD(&cons->rx_msg_list);
155         init_waitqueue_head(&cons->rx_msg_wq);
156
157         spin_lock_irq(&ipc->cons_list_lock);
158         list_add_tail(&cons->link, &ipc->cons_list);
159         spin_unlock_irq(&ipc->cons_list_lock);
160 }
161
162 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
163 {
164         struct ivpu_ipc_info *ipc = vdev->ipc;
165         struct ivpu_ipc_rx_msg *rx_msg, *r;
166
167         spin_lock_irq(&ipc->cons_list_lock);
168         list_del(&cons->link);
169         spin_unlock_irq(&ipc->cons_list_lock);
170
171         spin_lock_irq(&cons->rx_lock);
172         list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
173                 list_del(&rx_msg->link);
174                 if (!cons->aborted)
175                         ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
176                 atomic_dec(&ipc->rx_msg_count);
177                 kfree(rx_msg);
178         }
179         spin_unlock_irq(&cons->rx_lock);
180
181         ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
182 }
183
184 static int
185 ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
186 {
187         struct ivpu_ipc_info *ipc = vdev->ipc;
188         int ret;
189
190         mutex_lock(&ipc->lock);
191
192         if (!ipc->on) {
193                 ret = -EAGAIN;
194                 goto unlock;
195         }
196
197         ret = ivpu_ipc_tx_prepare(vdev, cons, req);
198         if (ret)
199                 goto unlock;
200
201         ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
202
203 unlock:
204         mutex_unlock(&ipc->lock);
205         return ret;
206 }
207
208 static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
209 {
210         int ret = 0;
211
212         if (IS_KTHREAD())
213                 ret |= (kthread_should_stop() || kthread_should_park());
214
215         spin_lock_irq(&cons->rx_lock);
216         ret |= !list_empty(&cons->rx_msg_list) || cons->aborted;
217         spin_unlock_irq(&cons->rx_lock);
218
219         return ret;
220 }
221
222 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
223                      struct ivpu_ipc_hdr *ipc_buf,
224                      struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
225 {
226         struct ivpu_ipc_info *ipc = vdev->ipc;
227         struct ivpu_ipc_rx_msg *rx_msg;
228         int wait_ret, ret = 0;
229
230         wait_ret = wait_event_timeout(cons->rx_msg_wq,
231                                       ivpu_ipc_rx_need_wakeup(cons),
232                                       msecs_to_jiffies(timeout_ms));
233
234         if (IS_KTHREAD() && kthread_should_stop())
235                 return -EINTR;
236
237         if (wait_ret == 0)
238                 return -ETIMEDOUT;
239
240         spin_lock_irq(&cons->rx_lock);
241         if (cons->aborted) {
242                 spin_unlock_irq(&cons->rx_lock);
243                 return -ECANCELED;
244         }
245         rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
246         if (!rx_msg) {
247                 spin_unlock_irq(&cons->rx_lock);
248                 return -EAGAIN;
249         }
250         list_del(&rx_msg->link);
251         spin_unlock_irq(&cons->rx_lock);
252
253         if (ipc_buf)
254                 memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
255         if (rx_msg->jsm_msg) {
256                 u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
257
258                 if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
259                         ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
260                         ret = -EBADMSG;
261                 }
262
263                 if (ipc_payload)
264                         memcpy(ipc_payload, rx_msg->jsm_msg, size);
265         }
266
267         ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
268         atomic_dec(&ipc->rx_msg_count);
269         kfree(rx_msg);
270
271         return ret;
272 }
273
274 static int
275 ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
276                                enum vpu_ipc_msg_type expected_resp_type,
277                                struct vpu_jsm_msg *resp, u32 channel,
278                                unsigned long timeout_ms)
279 {
280         struct ivpu_ipc_consumer cons;
281         int ret;
282
283         ivpu_ipc_consumer_add(vdev, &cons, channel);
284
285         ret = ivpu_ipc_send(vdev, &cons, req);
286         if (ret) {
287                 ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
288                 goto consumer_del;
289         }
290
291         ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
292         if (ret) {
293                 ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n",
294                                       ivpu_jsm_msg_type_to_str(req->type), ret);
295                 goto consumer_del;
296         }
297
298         if (resp->type != expected_resp_type) {
299                 ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
300                 ret = -EBADE;
301         }
302
303 consumer_del:
304         ivpu_ipc_consumer_del(vdev, &cons);
305         return ret;
306 }
307
308 int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
309                                  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
310                                  u32 channel, unsigned long timeout_ms)
311 {
312         struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
313         struct vpu_jsm_msg hb_resp;
314         int ret, hb_ret;
315
316         drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
317
318         ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
319         if (ret != -ETIMEDOUT)
320                 return ret;
321
322         hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
323                                                 &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
324                                                 vdev->timeout.jsm);
325         if (hb_ret == -ETIMEDOUT) {
326                 ivpu_hw_diagnose_failure(vdev);
327                 ivpu_pm_schedule_recovery(vdev);
328         }
329
330         return ret;
331 }
332
333 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
334                           enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
335                           u32 channel, unsigned long timeout_ms)
336 {
337         int ret;
338
339         ret = ivpu_rpm_get(vdev);
340         if (ret < 0)
341                 return ret;
342
343         ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
344
345         ivpu_rpm_put(vdev);
346         return ret;
347 }
348
349 static bool
350 ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
351                         struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
352 {
353         if (cons->channel != ipc_hdr->channel)
354                 return false;
355
356         if (!jsm_msg || jsm_msg->request_id == cons->request_id)
357                 return true;
358
359         return false;
360 }
361
362 static void
363 ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
364                   struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
365 {
366         struct ivpu_ipc_info *ipc = vdev->ipc;
367         struct ivpu_ipc_rx_msg *rx_msg;
368
369         lockdep_assert_held(&ipc->cons_list_lock);
370         lockdep_assert_irqs_disabled();
371
372         rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
373         if (!rx_msg) {
374                 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
375                 return;
376         }
377
378         atomic_inc(&ipc->rx_msg_count);
379
380         rx_msg->ipc_hdr = ipc_hdr;
381         rx_msg->jsm_msg = jsm_msg;
382
383         spin_lock(&cons->rx_lock);
384         list_add_tail(&rx_msg->link, &cons->rx_msg_list);
385         spin_unlock(&cons->rx_lock);
386
387         wake_up(&cons->rx_msg_wq);
388 }
389
390 int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
391 {
392         struct ivpu_ipc_info *ipc = vdev->ipc;
393         struct ivpu_ipc_consumer *cons;
394         struct ivpu_ipc_hdr *ipc_hdr;
395         struct vpu_jsm_msg *jsm_msg;
396         unsigned long flags;
397         bool dispatched;
398         u32 vpu_addr;
399
400         /*
401          * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
402          * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
403          */
404         while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
405                 vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
406                 if (vpu_addr == REG_IO_ERROR) {
407                         ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
408                         return -EIO;
409                 }
410
411                 ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
412                 if (!ipc_hdr) {
413                         ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
414                         continue;
415                 }
416                 ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
417
418                 jsm_msg = NULL;
419                 if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
420                         jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
421                         if (!jsm_msg) {
422                                 ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n",
423                                                       ipc_hdr->data_addr);
424                                 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
425                                 continue;
426                         }
427                         ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
428                 }
429
430                 if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
431                         ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n",
432                                               IPC_MAX_RX_MSG);
433                         ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
434                         continue;
435                 }
436
437                 dispatched = false;
438                 spin_lock_irqsave(&ipc->cons_list_lock, flags);
439                 list_for_each_entry(cons, &ipc->cons_list, link) {
440                         if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
441                                 ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
442                                 dispatched = true;
443                                 break;
444                         }
445                 }
446                 spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
447
448                 if (!dispatched) {
449                         ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
450                         ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
451                 }
452         }
453
454         return 0;
455 }
456
457 int ivpu_ipc_init(struct ivpu_device *vdev)
458 {
459         struct ivpu_ipc_info *ipc = vdev->ipc;
460         int ret;
461
462         ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
463         if (!ipc->mem_tx) {
464                 ivpu_err(vdev, "Failed to allocate mem_tx\n");
465                 return -ENOMEM;
466         }
467
468         ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
469         if (!ipc->mem_rx) {
470                 ivpu_err(vdev, "Failed to allocate mem_rx\n");
471                 ret = -ENOMEM;
472                 goto err_free_tx;
473         }
474
475         ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
476                                           -1, "TX_IPC_JSM");
477         if (IS_ERR(ipc->mm_tx)) {
478                 ret = PTR_ERR(ipc->mm_tx);
479                 ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
480                 goto err_free_rx;
481         }
482
483         ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ivpu_bo_size(ipc->mem_tx), -1);
484         if (ret) {
485                 ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
486                 goto err_free_rx;
487         }
488
489         INIT_LIST_HEAD(&ipc->cons_list);
490         spin_lock_init(&ipc->cons_list_lock);
491         drmm_mutex_init(&vdev->drm, &ipc->lock);
492
493         ivpu_ipc_reset(vdev);
494         return 0;
495
496 err_free_rx:
497         ivpu_bo_free_internal(ipc->mem_rx);
498 err_free_tx:
499         ivpu_bo_free_internal(ipc->mem_tx);
500         return ret;
501 }
502
503 void ivpu_ipc_fini(struct ivpu_device *vdev)
504 {
505         ivpu_ipc_mem_fini(vdev);
506 }
507
508 void ivpu_ipc_enable(struct ivpu_device *vdev)
509 {
510         struct ivpu_ipc_info *ipc = vdev->ipc;
511
512         mutex_lock(&ipc->lock);
513         ipc->on = true;
514         mutex_unlock(&ipc->lock);
515 }
516
517 void ivpu_ipc_disable(struct ivpu_device *vdev)
518 {
519         struct ivpu_ipc_info *ipc = vdev->ipc;
520         struct ivpu_ipc_consumer *cons, *c;
521         unsigned long flags;
522
523         mutex_lock(&ipc->lock);
524         ipc->on = false;
525         mutex_unlock(&ipc->lock);
526
527         spin_lock_irqsave(&ipc->cons_list_lock, flags);
528         list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
529                 if (cons->channel != VPU_IPC_CHAN_JOB_RET) {
530                         spin_lock(&cons->rx_lock);
531                         cons->aborted = true;
532                         spin_unlock(&cons->rx_lock);
533                 }
534                 wake_up(&cons->rx_msg_wq);
535         }
536         spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
537 }
538
539 void ivpu_ipc_reset(struct ivpu_device *vdev)
540 {
541         struct ivpu_ipc_info *ipc = vdev->ipc;
542
543         mutex_lock(&ipc->lock);
544         drm_WARN_ON(&vdev->drm, ipc->on);
545
546         memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
547         memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
548         wmb(); /* Flush WC buffers for TX and RX rings */
549
550         mutex_unlock(&ipc->lock);
551 }
This page took 0.065446 seconds and 4 git commands to generate.