]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
Merge tag 'perf-tools-for-v6.5-2-2023-07-06' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_nv.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33
34 #include "amdgpu_reset.h"
35
36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38         WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43         WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57         return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59
60
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62                                    enum idh_event event)
63 {
64         u32 reg;
65
66         reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
67         if (reg != event)
68                 return -ENOENT;
69
70         xgpu_nv_mailbox_send_ack(adev);
71
72         return 0;
73 }
74
75 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
76 {
77         return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78 }
79
80 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
81 {
82         int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
83         u8 reg;
84
85         do {
86                 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87                 if (reg & 2)
88                         return 0;
89
90                 mdelay(5);
91                 timeout -= 5;
92         } while (timeout > 1);
93
94         pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
95
96         return -ETIME;
97 }
98
99 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100 {
101         int r;
102         uint64_t timeout, now;
103
104         now = (uint64_t)ktime_to_ms(ktime_get());
105         timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
106
107         do {
108                 r = xgpu_nv_mailbox_rcv_msg(adev, event);
109                 if (!r)
110                         return 0;
111
112                 msleep(10);
113                 now = (uint64_t)ktime_to_ms(ktime_get());
114         } while (timeout > now);
115
116
117         return -ETIME;
118 }
119
120 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
121               enum idh_request req, u32 data1, u32 data2, u32 data3)
122 {
123         int r;
124         uint8_t trn;
125
126         /* IMPORTANT:
127          * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
128          * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
129          * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
130          * will return immediatly
131          */
132         do {
133                 xgpu_nv_mailbox_set_valid(adev, false);
134                 trn = xgpu_nv_peek_ack(adev);
135                 if (trn) {
136                         pr_err("trn=%x ACK should not assert! wait again !\n", trn);
137                         msleep(1);
138                 }
139         } while (trn);
140
141         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
142         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
143         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
144         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
145         xgpu_nv_mailbox_set_valid(adev, true);
146
147         /* start to poll ack */
148         r = xgpu_nv_poll_ack(adev);
149         if (r)
150                 pr_err("Doesn't get ack from pf, continue\n");
151
152         xgpu_nv_mailbox_set_valid(adev, false);
153 }
154
155 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
156                                         enum idh_request req)
157 {
158         int r, retry = 1;
159         enum idh_event event = -1;
160
161 send_request:
162         xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
163
164         switch (req) {
165         case IDH_REQ_GPU_INIT_ACCESS:
166         case IDH_REQ_GPU_FINI_ACCESS:
167         case IDH_REQ_GPU_RESET_ACCESS:
168                 event = IDH_READY_TO_ACCESS_GPU;
169                 break;
170         case IDH_REQ_GPU_INIT_DATA:
171                 event = IDH_REQ_GPU_INIT_DATA_READY;
172                 break;
173         default:
174                 break;
175         }
176
177         if (event != -1) {
178                 r = xgpu_nv_poll_msg(adev, event);
179                 if (r) {
180                         if (retry++ < 2)
181                                 goto send_request;
182
183                         if (req != IDH_REQ_GPU_INIT_DATA) {
184                                 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
185                                 return r;
186                         }
187                         else /* host doesn't support REQ_GPU_INIT_DATA handshake */
188                                 adev->virt.req_init_data_ver = 0;
189                 } else {
190                         if (req == IDH_REQ_GPU_INIT_DATA)
191                         {
192                                 adev->virt.req_init_data_ver =
193                                         RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
194
195                                 /* assume V1 in case host doesn't set version number */
196                                 if (adev->virt.req_init_data_ver < 1)
197                                         adev->virt.req_init_data_ver = 1;
198                         }
199                 }
200
201                 /* Retrieve checksum from mailbox2 */
202                 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
203                         adev->virt.fw_reserve.checksum_key =
204                                 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
205                 }
206         }
207
208         return 0;
209 }
210
211 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
212 {
213         int ret, i = 0;
214
215         while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
216                 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
217                 if (!ret)
218                         break;
219                 i++;
220         }
221
222         return ret;
223 }
224
225 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
226                                            bool init)
227 {
228         enum idh_request req;
229
230         req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
231         return xgpu_nv_send_access_requests(adev, req);
232 }
233
234 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
235                                            bool init)
236 {
237         enum idh_request req;
238         int r = 0;
239
240         req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
241         r = xgpu_nv_send_access_requests(adev, req);
242
243         return r;
244 }
245
246 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
247 {
248         return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
249 }
250
251 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
252                                         struct amdgpu_irq_src *source,
253                                         struct amdgpu_iv_entry *entry)
254 {
255         DRM_DEBUG("get ack intr and do nothing.\n");
256         return 0;
257 }
258
259 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
260                                         struct amdgpu_irq_src *source,
261                                         unsigned type,
262                                         enum amdgpu_interrupt_state state)
263 {
264         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
265
266         if (state == AMDGPU_IRQ_STATE_ENABLE)
267                 tmp |= 2;
268         else
269                 tmp &= ~2;
270
271         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
272
273         return 0;
274 }
275
276 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
277 {
278         struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
279         struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
280         int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
281
282         /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
283          * otherwise the mailbox msg will be ruined/reseted by
284          * the VF FLR.
285          */
286         if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
287                 return;
288
289         down_write(&adev->reset_domain->sem);
290
291         amdgpu_virt_fini_data_exchange(adev);
292
293         xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
294
295         do {
296                 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
297                         goto flr_done;
298
299                 msleep(10);
300                 timeout -= 10;
301         } while (timeout > 1);
302
303 flr_done:
304         atomic_set(&adev->reset_domain->in_gpu_reset, 0);
305         up_write(&adev->reset_domain->sem);
306
307         /* Trigger recovery for world switch failure if no TDR */
308         if (amdgpu_device_should_recover_gpu(adev)
309                 && (!amdgpu_device_has_job_running(adev) ||
310                 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
311                 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
312                 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
313                 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
314                 struct amdgpu_reset_context reset_context;
315                 memset(&reset_context, 0, sizeof(reset_context));
316
317                 reset_context.method = AMD_RESET_METHOD_NONE;
318                 reset_context.reset_req_dev = adev;
319                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
320
321                 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
322         }
323 }
324
325 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
326                                        struct amdgpu_irq_src *src,
327                                        unsigned type,
328                                        enum amdgpu_interrupt_state state)
329 {
330         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
331
332         if (state == AMDGPU_IRQ_STATE_ENABLE)
333                 tmp |= 1;
334         else
335                 tmp &= ~1;
336
337         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
338
339         return 0;
340 }
341
342 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
343                                    struct amdgpu_irq_src *source,
344                                    struct amdgpu_iv_entry *entry)
345 {
346         enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
347
348         switch (event) {
349         case IDH_FLR_NOTIFICATION:
350                 if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
351                         WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
352                                    &adev->virt.flr_work),
353                                   "Failed to queue work! at %s",
354                                   __func__);
355                 break;
356                 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
357                  * it byfar since that polling thread will handle it,
358                  * other msg like flr complete is not handled here.
359                  */
360         case IDH_CLR_MSG_BUF:
361         case IDH_FLR_NOTIFICATION_CMPL:
362         case IDH_READY_TO_ACCESS_GPU:
363         default:
364                 break;
365         }
366
367         return 0;
368 }
369
370 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
371         .set = xgpu_nv_set_mailbox_ack_irq,
372         .process = xgpu_nv_mailbox_ack_irq,
373 };
374
375 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
376         .set = xgpu_nv_set_mailbox_rcv_irq,
377         .process = xgpu_nv_mailbox_rcv_irq,
378 };
379
380 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
381 {
382         adev->virt.ack_irq.num_types = 1;
383         adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
384         adev->virt.rcv_irq.num_types = 1;
385         adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
386 }
387
388 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
389 {
390         int r;
391
392         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
393         if (r)
394                 return r;
395
396         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
397         if (r) {
398                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
399                 return r;
400         }
401
402         return 0;
403 }
404
405 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
406 {
407         int r;
408
409         r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
410         if (r)
411                 return r;
412         r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
413         if (r) {
414                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
415                 return r;
416         }
417
418         INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
419
420         return 0;
421 }
422
423 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
424 {
425         amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
426         amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
427 }
428
429 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
430 {
431         xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
432 }
433
434 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
435         .req_full_gpu   = xgpu_nv_request_full_gpu_access,
436         .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
437         .req_init_data  = xgpu_nv_request_init_data,
438         .reset_gpu = xgpu_nv_request_reset,
439         .wait_reset = NULL,
440         .trans_msg = xgpu_nv_mailbox_trans_msg,
441         .ras_poison_handler = xgpu_nv_ras_poison_handler,
442 };
This page took 0.061977 seconds and 4 git commands to generate.