2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28 /* By now all MMIO pages except mailbox are blocked */
29 /* if blocking is enabled in hypervisor. Choose the */
30 /* SCRATCH_REG0 to test. */
31 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
34 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
36 /* enable virtual display */
37 adev->mode_info.num_crtc = 1;
38 adev->enable_virtual_display = true;
43 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
45 signed long r, cnt = 0;
48 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
49 struct amdgpu_ring *ring = &kiq->ring;
51 BUG_ON(!ring->funcs->emit_rreg);
53 spin_lock_irqsave(&kiq->ring_lock, flags);
54 amdgpu_ring_alloc(ring, 32);
55 amdgpu_ring_emit_rreg(ring, reg);
56 amdgpu_fence_emit_polling(ring, &seq);
57 amdgpu_ring_commit(ring);
58 spin_unlock_irqrestore(&kiq->ring_lock, flags);
60 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
62 /* don't wait anymore for gpu reset case because this way may
63 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
64 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
65 * never return if we keep waiting in virt_kiq_rreg, which cause
66 * gpu_recover() hang there.
68 * also don't wait anymore for IRQ context
70 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
76 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
77 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
78 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
81 if (cnt > MAX_KIQ_REG_TRY)
84 return adev->wb.wb[adev->virt.reg_val_offs];
87 pr_err("failed to read reg:%x\n", reg);
91 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
93 signed long r, cnt = 0;
96 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
97 struct amdgpu_ring *ring = &kiq->ring;
99 BUG_ON(!ring->funcs->emit_wreg);
101 spin_lock_irqsave(&kiq->ring_lock, flags);
102 amdgpu_ring_alloc(ring, 32);
103 amdgpu_ring_emit_wreg(ring, reg, v);
104 amdgpu_fence_emit_polling(ring, &seq);
105 amdgpu_ring_commit(ring);
106 spin_unlock_irqrestore(&kiq->ring_lock, flags);
108 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
110 /* don't wait anymore for gpu reset case because this way may
111 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
112 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
113 * never return if we keep waiting in virt_kiq_rreg, which cause
114 * gpu_recover() hang there.
116 * also don't wait anymore for IRQ context
118 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
119 goto failed_kiq_write;
124 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
126 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
127 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
130 if (cnt > MAX_KIQ_REG_TRY)
131 goto failed_kiq_write;
136 pr_err("failed to write reg:%x\n", reg);
140 * amdgpu_virt_request_full_gpu() - request full gpu access
141 * @amdgpu: amdgpu device.
142 * @init: is driver init time.
143 * When start to init/fini driver, first need to request full gpu access.
144 * Return: Zero if request success, otherwise will return error.
146 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
148 struct amdgpu_virt *virt = &adev->virt;
151 if (virt->ops && virt->ops->req_full_gpu) {
152 r = virt->ops->req_full_gpu(adev, init);
156 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
163 * amdgpu_virt_release_full_gpu() - release full gpu access
164 * @amdgpu: amdgpu device.
165 * @init: is driver init time.
166 * When finishing driver init/fini, need to release full gpu access.
167 * Return: Zero if release success, otherwise will returen error.
169 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
171 struct amdgpu_virt *virt = &adev->virt;
174 if (virt->ops && virt->ops->rel_full_gpu) {
175 r = virt->ops->rel_full_gpu(adev, init);
179 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
185 * amdgpu_virt_reset_gpu() - reset gpu
186 * @amdgpu: amdgpu device.
187 * Send reset command to GPU hypervisor to reset GPU that VM is using
188 * Return: Zero if reset success, otherwise will return error.
190 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
192 struct amdgpu_virt *virt = &adev->virt;
195 if (virt->ops && virt->ops->reset_gpu) {
196 r = virt->ops->reset_gpu(adev);
200 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
207 * amdgpu_virt_wait_reset() - wait for reset gpu completed
208 * @amdgpu: amdgpu device.
209 * Wait for GPU reset completed.
210 * Return: Zero if reset success, otherwise will return error.
212 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
214 struct amdgpu_virt *virt = &adev->virt;
216 if (!virt->ops || !virt->ops->wait_reset)
219 return virt->ops->wait_reset(adev);
223 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
224 * @amdgpu: amdgpu device.
225 * MM table is used by UVD and VCE for its initialization
226 * Return: Zero if allocate success.
228 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
232 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
235 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
236 AMDGPU_GEM_DOMAIN_VRAM,
237 &adev->virt.mm_table.bo,
238 &adev->virt.mm_table.gpu_addr,
239 (void *)&adev->virt.mm_table.cpu_addr);
241 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
245 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
246 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
247 adev->virt.mm_table.gpu_addr,
248 adev->virt.mm_table.cpu_addr);
253 * amdgpu_virt_free_mm_table() - free mm table memory
254 * @amdgpu: amdgpu device.
255 * Free MM table memory
257 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
259 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
262 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
263 &adev->virt.mm_table.gpu_addr,
264 (void *)&adev->virt.mm_table.cpu_addr);
265 adev->virt.mm_table.gpu_addr = 0;
269 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
270 unsigned long obj_size,
274 unsigned int ret = key;
279 /* calculate checksum */
280 for (i = 0; i < obj_size; ++i)
282 /* minus the chksum itself */
283 pos = (char *)&chksum;
284 for (i = 0; i < sizeof(chksum); ++i)
289 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
291 uint32_t pf2vf_size = 0;
292 uint32_t checksum = 0;
296 adev->virt.fw_reserve.p_pf2vf = NULL;
297 adev->virt.fw_reserve.p_vf2pf = NULL;
299 if (adev->fw_vram_usage.va != NULL) {
300 adev->virt.fw_reserve.p_pf2vf =
301 (struct amdgim_pf2vf_info_header *)(
302 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
303 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
304 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
305 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
307 /* pf2vf message must be in 4K */
308 if (pf2vf_size > 0 && pf2vf_size < 4096) {
309 checkval = amdgpu_virt_fw_reserve_get_checksum(
310 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
311 adev->virt.fw_reserve.checksum_key, checksum);
312 if (checkval == checksum) {
313 adev->virt.fw_reserve.p_vf2pf =
314 ((void *)adev->virt.fw_reserve.p_pf2vf +
316 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
317 sizeof(amdgim_vf2pf_info));
318 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
319 AMDGPU_FW_VRAM_VF2PF_VER);
320 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
321 sizeof(amdgim_vf2pf_info));
322 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
325 if (THIS_MODULE->version != NULL)
326 strcpy(str, THIS_MODULE->version);
330 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
332 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
333 amdgpu_virt_fw_reserve_get_checksum(
334 adev->virt.fw_reserve.p_vf2pf,
336 adev->virt.fw_reserve.checksum_key, 0));