2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "vega10/soc15ip.h"
26 #include "vega10/NBIO/nbio_6_1_offset.h"
27 #include "vega10/NBIO/nbio_6_1_sh_mask.h"
28 #include "vega10/GC/gc_9_0_offset.h"
29 #include "vega10/GC/gc_9_0_sh_mask.h"
31 #include "vega10_ih.h"
32 #include "soc15_common.h"
35 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
38 int timeout = AI_MAILBOX_TIMEDOUT;
39 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
41 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
42 mmBIF_BX_PF0_MAILBOX_CONTROL));
43 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
44 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
45 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
47 /*Wait for RCV_MSG_VALID to be 0*/
48 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
49 mmBIF_BX_PF0_MAILBOX_CONTROL));
52 pr_err("RCV_MSG_VALID is not cleared\n");
58 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59 mmBIF_BX_PF0_MAILBOX_CONTROL));
63 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
67 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
68 mmBIF_BX_PF0_MAILBOX_CONTROL));
69 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
70 TRN_MSG_VALID, val ? 1 : 0);
71 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
75 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
79 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
81 if (event != IDH_FLR_NOTIFICATION_CMPL) {
82 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
83 mmBIF_BX_PF0_MAILBOX_CONTROL));
88 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
89 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
93 xgpu_ai_mailbox_send_ack(adev);
98 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
100 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
101 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
104 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
105 mmBIF_BX_PF0_MAILBOX_CONTROL));
106 while (!(reg & mask)) {
108 pr_err("Doesn't get ack from pf.\n");
115 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
116 mmBIF_BX_PF0_MAILBOX_CONTROL));
122 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
124 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
126 r = xgpu_ai_mailbox_rcv_msg(adev, event);
129 pr_err("Doesn't get msg:%d from pf.\n", event);
136 r = xgpu_ai_mailbox_rcv_msg(adev, event);
142 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
143 enum idh_request req, u32 data1, u32 data2, u32 data3) {
147 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
148 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
149 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
151 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
153 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
155 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
157 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
160 xgpu_ai_mailbox_set_valid(adev, true);
162 /* start to poll ack */
163 r = xgpu_ai_poll_ack(adev);
165 pr_err("Doesn't get ack from pf, continue\n");
167 xgpu_ai_mailbox_set_valid(adev, false);
170 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
171 enum idh_request req)
175 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
177 /* start to check msg if request is idh_req_gpu_init_access */
178 if (req == IDH_REQ_GPU_INIT_ACCESS ||
179 req == IDH_REQ_GPU_FINI_ACCESS ||
180 req == IDH_REQ_GPU_RESET_ACCESS) {
181 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
183 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
186 /* Retrieve checksum from mailbox2 */
187 if (req == IDH_REQ_GPU_INIT_ACCESS) {
188 adev->virt.fw_reserve.checksum_key =
189 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
190 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
197 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
199 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
202 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
205 enum idh_request req;
207 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
208 return xgpu_ai_send_access_requests(adev, req);
211 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
214 enum idh_request req;
217 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
218 r = xgpu_ai_send_access_requests(adev, req);
223 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
224 struct amdgpu_irq_src *source,
225 struct amdgpu_iv_entry *entry)
227 DRM_DEBUG("get ack intr and do nothing.\n");
231 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
232 struct amdgpu_irq_src *source,
234 enum amdgpu_interrupt_state state)
236 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
238 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
239 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
240 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
245 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
247 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
248 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
250 /* wait until RCV_MSG become 3 */
251 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
252 pr_err("failed to recieve FLR_CMPL\n");
256 /* Trigger recovery due to world switch failure */
257 amdgpu_sriov_gpu_reset(adev, NULL);
260 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
261 struct amdgpu_irq_src *src,
263 enum amdgpu_interrupt_state state)
265 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
267 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
268 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
269 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
274 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
275 struct amdgpu_irq_src *source,
276 struct amdgpu_iv_entry *entry)
280 /* trigger gpu-reset by hypervisor only if TDR disbaled */
281 if (amdgpu_lockup_timeout == 0) {
282 /* see what event we get */
283 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
285 /* only handle FLR_NOTIFY now */
287 schedule_work(&adev->virt.flr_work);
293 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
294 .set = xgpu_ai_set_mailbox_ack_irq,
295 .process = xgpu_ai_mailbox_ack_irq,
298 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
299 .set = xgpu_ai_set_mailbox_rcv_irq,
300 .process = xgpu_ai_mailbox_rcv_irq,
303 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
305 adev->virt.ack_irq.num_types = 1;
306 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
307 adev->virt.rcv_irq.num_types = 1;
308 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
311 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
315 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
319 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
321 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
328 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
332 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
335 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
337 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
341 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
346 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
348 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
349 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
352 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
353 .req_full_gpu = xgpu_ai_request_full_gpu_access,
354 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
355 .reset_gpu = xgpu_ai_request_reset,
356 .trans_msg = xgpu_ai_mailbox_trans_msg,