2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "vega10/soc15ip.h"
26 #include "vega10/NBIO/nbio_6_1_offset.h"
27 #include "vega10/NBIO/nbio_6_1_sh_mask.h"
28 #include "vega10/GC/gc_9_0_offset.h"
29 #include "vega10/GC/gc_9_0_sh_mask.h"
31 #include "soc15_common.h"
34 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
37 int timeout = AI_MAILBOX_TIMEDOUT;
38 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
40 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
41 mmBIF_BX_PF0_MAILBOX_CONTROL));
42 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
43 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
44 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
46 /*Wait for RCV_MSG_VALID to be 0*/
47 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
48 mmBIF_BX_PF0_MAILBOX_CONTROL));
51 pr_err("RCV_MSG_VALID is not cleared\n");
57 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
58 mmBIF_BX_PF0_MAILBOX_CONTROL));
62 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF0_MAILBOX_CONTROL));
68 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
69 TRN_MSG_VALID, val ? 1 : 0);
70 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
74 static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev,
79 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
80 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
81 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
83 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
86 xgpu_ai_mailbox_set_valid(adev, true);
89 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
93 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
95 if (event != IDH_FLR_NOTIFICATION_CMPL) {
96 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
97 mmBIF_BX_PF0_MAILBOX_CONTROL));
102 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
103 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
107 xgpu_ai_mailbox_send_ack(adev);
112 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
114 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
115 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
118 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
119 mmBIF_BX_PF0_MAILBOX_CONTROL));
120 while (!(reg & mask)) {
122 pr_err("Doesn't get ack from pf.\n");
129 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
130 mmBIF_BX_PF0_MAILBOX_CONTROL));
136 static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
138 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
140 r = xgpu_ai_mailbox_rcv_msg(adev, event);
143 pr_err("Doesn't get ack from pf.\n");
150 r = xgpu_ai_mailbox_rcv_msg(adev, event);
157 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
158 enum idh_request req)
162 xgpu_ai_mailbox_trans_msg(adev, req);
164 /* start to poll ack */
165 r = xgpu_ai_poll_ack(adev);
169 xgpu_ai_mailbox_set_valid(adev, false);
171 /* start to check msg if request is idh_req_gpu_init_access */
172 if (req == IDH_REQ_GPU_INIT_ACCESS ||
173 req == IDH_REQ_GPU_FINI_ACCESS ||
174 req == IDH_REQ_GPU_RESET_ACCESS) {
175 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
183 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
186 enum idh_request req;
188 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
189 return xgpu_ai_send_access_requests(adev, req);
192 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
195 enum idh_request req;
198 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
199 r = xgpu_ai_send_access_requests(adev, req);
204 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
205 .req_full_gpu = xgpu_ai_request_full_gpu_access,
206 .rel_full_gpu = xgpu_ai_release_full_gpu_access,