]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
Merge topic branches 'clkdev' and 'fixes' into for-linus
[linux.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_ai.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_6_1_offset.h"
26 #include "nbio/nbio_6_1_sh_mask.h"
27 #include "gc/gc_9_0_offset.h"
28 #include "gc/gc_9_0_sh_mask.h"
29 #include "mp/mp_9_0_offset.h"
30 #include "soc15.h"
31 #include "vega10_ih.h"
32 #include "soc15_common.h"
33 #include "mxgpu_ai.h"
34
35 #include "amdgpu_reset.h"
36
37 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
38 {
39         WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
40 }
41
42 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
43 {
44         WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
45 }
46
47 /*
48  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
49  * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
50  * by host.
51  *
52  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
53  * correct value since it doesn't return the RCV_DW0 under the case that
54  * RCV_MSG_VALID is set by host.
55  */
56 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
57 {
58         return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59                                 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
60 }
61
62
63 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
64                                    enum idh_event event)
65 {
66         u32 reg;
67
68         reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
69                                              mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
70         if (reg != event)
71                 return -ENOENT;
72
73         xgpu_ai_mailbox_send_ack(adev);
74
75         return 0;
76 }
77
78 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
79         return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
80 }
81
82 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
83 {
84         int timeout  = AI_MAILBOX_POLL_ACK_TIMEDOUT;
85         u8 reg;
86
87         do {
88                 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
89                 if (reg & 2)
90                         return 0;
91
92                 mdelay(5);
93                 timeout -= 5;
94         } while (timeout > 1);
95
96         pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
97
98         return -ETIME;
99 }
100
101 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
102 {
103         int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
104
105         do {
106                 r = xgpu_ai_mailbox_rcv_msg(adev, event);
107                 if (!r)
108                         return 0;
109
110                 msleep(10);
111                 timeout -= 10;
112         } while (timeout > 1);
113
114         pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
115
116         return -ETIME;
117 }
118
119 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
120               enum idh_request req, u32 data1, u32 data2, u32 data3) {
121         u32 reg;
122         int r;
123         uint8_t trn;
124
125         /* IMPORTANT:
126          * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
127          * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
128          * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
129          * will return immediatly
130          */
131         do {
132                 xgpu_ai_mailbox_set_valid(adev, false);
133                 trn = xgpu_ai_peek_ack(adev);
134                 if (trn) {
135                         pr_err("trn=%x ACK should not assert! wait again !\n", trn);
136                         msleep(1);
137                 }
138         } while(trn);
139
140         reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
141                                              mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
142         reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
143                             MSGBUF_DATA, req);
144         WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
145                       reg);
146         WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
147                                 data1);
148         WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
149                                 data2);
150         WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
151                                 data3);
152
153         xgpu_ai_mailbox_set_valid(adev, true);
154
155         /* start to poll ack */
156         r = xgpu_ai_poll_ack(adev);
157         if (r)
158                 pr_err("Doesn't get ack from pf, continue\n");
159
160         xgpu_ai_mailbox_set_valid(adev, false);
161 }
162
163 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
164                                         enum idh_request req)
165 {
166         int r;
167
168         xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
169
170         /* start to check msg if request is idh_req_gpu_init_access */
171         if (req == IDH_REQ_GPU_INIT_ACCESS ||
172                 req == IDH_REQ_GPU_FINI_ACCESS ||
173                 req == IDH_REQ_GPU_RESET_ACCESS) {
174                 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
175                 if (r) {
176                         pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
177                         return r;
178                 }
179                 /* Retrieve checksum from mailbox2 */
180                 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
181                         adev->virt.fw_reserve.checksum_key =
182                                 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
183                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
184                 }
185         } else if (req == IDH_REQ_GPU_INIT_DATA){
186                 /* Dummy REQ_GPU_INIT_DATA handling */
187                 r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
188                 /* version set to 0 since dummy */
189                 adev->virt.req_init_data_ver = 0;       
190         }
191
192         return 0;
193 }
194
195 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
196 {
197         int ret, i = 0;
198
199         while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
200                 ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
201                 if (!ret)
202                         break;
203                 i++;
204         }
205
206         return ret;
207 }
208
209 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
210                                            bool init)
211 {
212         enum idh_request req;
213
214         req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
215         return xgpu_ai_send_access_requests(adev, req);
216 }
217
218 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
219                                            bool init)
220 {
221         enum idh_request req;
222         int r = 0;
223
224         req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
225         r = xgpu_ai_send_access_requests(adev, req);
226
227         return r;
228 }
229
230 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
231                                         struct amdgpu_irq_src *source,
232                                         struct amdgpu_iv_entry *entry)
233 {
234         DRM_DEBUG("get ack intr and do nothing.\n");
235         return 0;
236 }
237
238 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
239                                         struct amdgpu_irq_src *source,
240                                         unsigned type,
241                                         enum amdgpu_interrupt_state state)
242 {
243         u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
244
245         tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
246                                 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
247         WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
248
249         return 0;
250 }
251
252 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
253 {
254         struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
255         struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
256         int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
257
258         /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
259          * otherwise the mailbox msg will be ruined/reseted by
260          * the VF FLR.
261          */
262         if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
263                 return;
264
265         down_write(&adev->reset_domain->sem);
266
267         amdgpu_virt_fini_data_exchange(adev);
268
269         xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
270
271         do {
272                 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
273                         goto flr_done;
274
275                 msleep(10);
276                 timeout -= 10;
277         } while (timeout > 1);
278
279         dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
280
281 flr_done:
282         atomic_set(&adev->reset_domain->in_gpu_reset, 0);
283         up_write(&adev->reset_domain->sem);
284
285         /* Trigger recovery for world switch failure if no TDR */
286         if (amdgpu_device_should_recover_gpu(adev)
287                 && (!amdgpu_device_has_job_running(adev) ||
288                         adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
289                 struct amdgpu_reset_context reset_context;
290                 memset(&reset_context, 0, sizeof(reset_context));
291
292                 reset_context.method = AMD_RESET_METHOD_NONE;
293                 reset_context.reset_req_dev = adev;
294                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
295
296                 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
297         }
298 }
299
300 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
301                                        struct amdgpu_irq_src *src,
302                                        unsigned type,
303                                        enum amdgpu_interrupt_state state)
304 {
305         u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
306
307         tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
308                             (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
309         WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
310
311         return 0;
312 }
313
314 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
315                                    struct amdgpu_irq_src *source,
316                                    struct amdgpu_iv_entry *entry)
317 {
318         enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
319
320         switch (event) {
321                 case IDH_FLR_NOTIFICATION:
322                 if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
323                         WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
324                                                                 &adev->virt.flr_work),
325                                   "Failed to queue work! at %s",
326                                   __func__);
327                 break;
328                 case IDH_QUERY_ALIVE:
329                         xgpu_ai_mailbox_send_ack(adev);
330                         break;
331                 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
332                  * it byfar since that polling thread will handle it,
333                  * other msg like flr complete is not handled here.
334                  */
335                 case IDH_CLR_MSG_BUF:
336                 case IDH_FLR_NOTIFICATION_CMPL:
337                 case IDH_READY_TO_ACCESS_GPU:
338                 default:
339                 break;
340         }
341
342         return 0;
343 }
344
345 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
346         .set = xgpu_ai_set_mailbox_ack_irq,
347         .process = xgpu_ai_mailbox_ack_irq,
348 };
349
350 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
351         .set = xgpu_ai_set_mailbox_rcv_irq,
352         .process = xgpu_ai_mailbox_rcv_irq,
353 };
354
355 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
356 {
357         adev->virt.ack_irq.num_types = 1;
358         adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
359         adev->virt.rcv_irq.num_types = 1;
360         adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
361 }
362
363 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
364 {
365         int r;
366
367         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
368         if (r)
369                 return r;
370
371         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
372         if (r) {
373                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
374                 return r;
375         }
376
377         return 0;
378 }
379
380 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
381 {
382         int r;
383
384         r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
385         if (r)
386                 return r;
387         r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
388         if (r) {
389                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
390                 return r;
391         }
392
393         INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
394
395         return 0;
396 }
397
398 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
399 {
400         amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
401         amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
402 }
403
404 static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
405 {
406         return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
407 }
408
409 static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev,
410                                         enum amdgpu_ras_block block)
411 {
412         xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
413 }
414
415 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
416         .req_full_gpu   = xgpu_ai_request_full_gpu_access,
417         .rel_full_gpu   = xgpu_ai_release_full_gpu_access,
418         .reset_gpu = xgpu_ai_request_reset,
419         .wait_reset = NULL,
420         .trans_msg = xgpu_ai_mailbox_trans_msg,
421         .req_init_data  = xgpu_ai_request_init_data,
422         .ras_poison_handler = xgpu_ai_ras_poison_handler,
423 };
This page took 0.057385 seconds and 4 git commands to generate.