]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
Merge tag 'sysctl-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/sysctl...
[linux.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_nv.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33
34 #include "amdgpu_reset.h"
35
36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38         WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43         WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57         return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59
60
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62                                    enum idh_event event)
63 {
64         u32 reg;
65
66         reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
67         if (reg != event)
68                 return -ENOENT;
69
70         xgpu_nv_mailbox_send_ack(adev);
71
72         return 0;
73 }
74
75 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
76 {
77         return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78 }
79
80 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
81 {
82         int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
83         u8 reg;
84
85         do {
86                 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87                 if (reg & 2)
88                         return 0;
89
90                 mdelay(5);
91                 timeout -= 5;
92         } while (timeout > 1);
93
94         pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
95
96         return -ETIME;
97 }
98
99 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100 {
101         int r;
102         uint64_t timeout, now;
103
104         now = (uint64_t)ktime_to_ms(ktime_get());
105         timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
106
107         do {
108                 r = xgpu_nv_mailbox_rcv_msg(adev, event);
109                 if (!r)
110                         return 0;
111
112                 msleep(10);
113                 now = (uint64_t)ktime_to_ms(ktime_get());
114         } while (timeout > now);
115
116
117         return -ETIME;
118 }
119
120 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
121               enum idh_request req, u32 data1, u32 data2, u32 data3)
122 {
123         int r;
124         uint8_t trn;
125
126         /* IMPORTANT:
127          * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
128          * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
129          * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
130          * will return immediatly
131          */
132         do {
133                 xgpu_nv_mailbox_set_valid(adev, false);
134                 trn = xgpu_nv_peek_ack(adev);
135                 if (trn) {
136                         pr_err("trn=%x ACK should not assert! wait again !\n", trn);
137                         msleep(1);
138                 }
139         } while (trn);
140
141         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
142         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
143         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
144         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
145         xgpu_nv_mailbox_set_valid(adev, true);
146
147         /* start to poll ack */
148         r = xgpu_nv_poll_ack(adev);
149         if (r)
150                 pr_err("Doesn't get ack from pf, continue\n");
151
152         xgpu_nv_mailbox_set_valid(adev, false);
153 }
154
155 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
156                         enum idh_request req, u32 data1, u32 data2, u32 data3)
157 {
158         int r, retry = 1;
159         enum idh_event event = -1;
160
161 send_request:
162         xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
163
164         switch (req) {
165         case IDH_REQ_GPU_INIT_ACCESS:
166         case IDH_REQ_GPU_FINI_ACCESS:
167         case IDH_REQ_GPU_RESET_ACCESS:
168                 event = IDH_READY_TO_ACCESS_GPU;
169                 break;
170         case IDH_REQ_GPU_INIT_DATA:
171                 event = IDH_REQ_GPU_INIT_DATA_READY;
172                 break;
173         case IDH_RAS_POISON:
174                 if (data1 != 0)
175                         event = IDH_RAS_POISON_READY;
176                 break;
177         default:
178                 break;
179         }
180
181         if (event != -1) {
182                 r = xgpu_nv_poll_msg(adev, event);
183                 if (r) {
184                         if (retry++ < 2)
185                                 goto send_request;
186
187                         if (req != IDH_REQ_GPU_INIT_DATA) {
188                                 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
189                                 return r;
190                         } else /* host doesn't support REQ_GPU_INIT_DATA handshake */
191                                 adev->virt.req_init_data_ver = 0;
192                 } else {
193                         if (req == IDH_REQ_GPU_INIT_DATA) {
194                                 adev->virt.req_init_data_ver =
195                                         RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
196
197                                 /* assume V1 in case host doesn't set version number */
198                                 if (adev->virt.req_init_data_ver < 1)
199                                         adev->virt.req_init_data_ver = 1;
200                         }
201                 }
202
203                 /* Retrieve checksum from mailbox2 */
204                 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
205                         adev->virt.fw_reserve.checksum_key =
206                                 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
207                 }
208         }
209
210         return 0;
211 }
212
213 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
214                                         enum idh_request req)
215 {
216         return xgpu_nv_send_access_requests_with_param(adev,
217                                                 req, 0, 0, 0);
218 }
219
220 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
221 {
222         int ret, i = 0;
223
224         while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
225                 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
226                 if (!ret)
227                         break;
228                 i++;
229         }
230
231         return ret;
232 }
233
234 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
235                                            bool init)
236 {
237         enum idh_request req;
238
239         req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
240         return xgpu_nv_send_access_requests(adev, req);
241 }
242
243 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
244                                            bool init)
245 {
246         enum idh_request req;
247         int r = 0;
248
249         req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
250         r = xgpu_nv_send_access_requests(adev, req);
251
252         return r;
253 }
254
255 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
256 {
257         return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
258 }
259
260 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
261                                         struct amdgpu_irq_src *source,
262                                         struct amdgpu_iv_entry *entry)
263 {
264         DRM_DEBUG("get ack intr and do nothing.\n");
265         return 0;
266 }
267
268 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
269                                         struct amdgpu_irq_src *source,
270                                         unsigned type,
271                                         enum amdgpu_interrupt_state state)
272 {
273         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
274
275         if (state == AMDGPU_IRQ_STATE_ENABLE)
276                 tmp |= 2;
277         else
278                 tmp &= ~2;
279
280         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
281
282         return 0;
283 }
284
285 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
286 {
287         struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
288         struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
289         int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
290
291         /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
292          * otherwise the mailbox msg will be ruined/reseted by
293          * the VF FLR.
294          */
295         if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
296                 return;
297
298         down_write(&adev->reset_domain->sem);
299
300         amdgpu_virt_fini_data_exchange(adev);
301
302         xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
303
304         do {
305                 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
306                         goto flr_done;
307
308                 msleep(10);
309                 timeout -= 10;
310         } while (timeout > 1);
311
312         dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
313
314 flr_done:
315         atomic_set(&adev->reset_domain->in_gpu_reset, 0);
316         up_write(&adev->reset_domain->sem);
317
318         /* Trigger recovery for world switch failure if no TDR */
319         if (amdgpu_device_should_recover_gpu(adev)
320                 && (!amdgpu_device_has_job_running(adev) ||
321                 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
322                 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
323                 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
324                 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
325                 struct amdgpu_reset_context reset_context;
326                 memset(&reset_context, 0, sizeof(reset_context));
327
328                 reset_context.method = AMD_RESET_METHOD_NONE;
329                 reset_context.reset_req_dev = adev;
330                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
331
332                 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
333         }
334 }
335
336 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
337                                        struct amdgpu_irq_src *src,
338                                        unsigned type,
339                                        enum amdgpu_interrupt_state state)
340 {
341         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
342
343         if (state == AMDGPU_IRQ_STATE_ENABLE)
344                 tmp |= 1;
345         else
346                 tmp &= ~1;
347
348         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
349
350         return 0;
351 }
352
353 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
354                                    struct amdgpu_irq_src *source,
355                                    struct amdgpu_iv_entry *entry)
356 {
357         enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
358
359         switch (event) {
360         case IDH_FLR_NOTIFICATION:
361                 if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
362                         WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
363                                    &adev->virt.flr_work),
364                                   "Failed to queue work! at %s",
365                                   __func__);
366                 break;
367                 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
368                  * it byfar since that polling thread will handle it,
369                  * other msg like flr complete is not handled here.
370                  */
371         case IDH_CLR_MSG_BUF:
372         case IDH_FLR_NOTIFICATION_CMPL:
373         case IDH_READY_TO_ACCESS_GPU:
374         default:
375                 break;
376         }
377
378         return 0;
379 }
380
381 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
382         .set = xgpu_nv_set_mailbox_ack_irq,
383         .process = xgpu_nv_mailbox_ack_irq,
384 };
385
386 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
387         .set = xgpu_nv_set_mailbox_rcv_irq,
388         .process = xgpu_nv_mailbox_rcv_irq,
389 };
390
391 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
392 {
393         adev->virt.ack_irq.num_types = 1;
394         adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
395         adev->virt.rcv_irq.num_types = 1;
396         adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
397 }
398
399 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
400 {
401         int r;
402
403         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
404         if (r)
405                 return r;
406
407         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
408         if (r) {
409                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
410                 return r;
411         }
412
413         return 0;
414 }
415
416 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
417 {
418         int r;
419
420         r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
421         if (r)
422                 return r;
423         r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
424         if (r) {
425                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
426                 return r;
427         }
428
429         INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
430
431         return 0;
432 }
433
434 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
435 {
436         amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
437         amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
438 }
439
440 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
441                 enum amdgpu_ras_block block)
442 {
443         if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
444                 xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
445         } else {
446                 amdgpu_virt_fini_data_exchange(adev);
447                 xgpu_nv_send_access_requests_with_param(adev,
448                                         IDH_RAS_POISON, block, 0, 0);
449         }
450 }
451
452 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
453         .req_full_gpu   = xgpu_nv_request_full_gpu_access,
454         .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
455         .req_init_data  = xgpu_nv_request_init_data,
456         .reset_gpu = xgpu_nv_request_reset,
457         .wait_reset = NULL,
458         .trans_msg = xgpu_nv_mailbox_trans_msg,
459         .ras_poison_handler = xgpu_nv_ras_poison_handler,
460 };
This page took 0.059417 seconds and 4 git commands to generate.