2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
27 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
29 /* By now all MMIO pages except mailbox are blocked */
30 /* if blocking is enabled in hypervisor. Choose the */
31 /* SCRATCH_REG0 to test. */
32 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
35 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
40 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
41 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
42 &adev->virt.csa_vmid0_addr, &ptr);
46 memset(ptr, 0, AMDGPU_CSA_SIZE);
51 * amdgpu_map_static_csa should be called during amdgpu_vm_init
52 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
53 * to this VM, and each command submission of GFX should use this virtual
54 * address within META_DATA init package to support SRIOV gfx preemption.
57 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
58 struct amdgpu_bo_va **bo_va)
60 struct ww_acquire_ctx ticket;
61 struct list_head list;
62 struct amdgpu_bo_list_entry pd;
63 struct ttm_validate_buffer csa_tv;
66 INIT_LIST_HEAD(&list);
67 INIT_LIST_HEAD(&csa_tv.head);
68 csa_tv.bo = &adev->virt.csa_obj->tbo;
71 list_add(&csa_tv.head, &list);
72 amdgpu_vm_get_pd_bo(vm, &list, &pd);
74 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
76 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
80 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
82 ttm_eu_backoff_reservation(&ticket, &list);
83 DRM_ERROR("failed to create bo_va for static CSA\n");
87 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
90 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
91 amdgpu_vm_bo_rmv(adev, *bo_va);
92 ttm_eu_backoff_reservation(&ticket, &list);
96 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
97 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
98 AMDGPU_PTE_EXECUTABLE);
101 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
102 amdgpu_vm_bo_rmv(adev, *bo_va);
103 ttm_eu_backoff_reservation(&ticket, &list);
107 ttm_eu_backoff_reservation(&ticket, &list);
111 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
113 /* enable virtual display */
114 adev->mode_info.num_crtc = 1;
115 adev->enable_virtual_display = true;
119 mutex_init(&adev->virt.lock_reset);
122 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
127 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
128 struct amdgpu_ring *ring = &kiq->ring;
130 BUG_ON(!ring->funcs->emit_rreg);
132 spin_lock_irqsave(&kiq->ring_lock, flags);
133 amdgpu_ring_alloc(ring, 32);
134 amdgpu_ring_emit_rreg(ring, reg);
135 amdgpu_fence_emit_polling(ring, &seq);
136 amdgpu_ring_commit(ring);
137 spin_unlock_irqrestore(&kiq->ring_lock, flags);
139 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
141 DRM_ERROR("wait for kiq fence error: %ld\n", r);
144 val = adev->wb.wb[adev->virt.reg_val_offs];
149 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
154 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
155 struct amdgpu_ring *ring = &kiq->ring;
157 BUG_ON(!ring->funcs->emit_wreg);
159 spin_lock_irqsave(&kiq->ring_lock, flags);
160 amdgpu_ring_alloc(ring, 32);
161 amdgpu_ring_emit_wreg(ring, reg, v);
162 amdgpu_fence_emit_polling(ring, &seq);
163 amdgpu_ring_commit(ring);
164 spin_unlock_irqrestore(&kiq->ring_lock, flags);
166 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
168 DRM_ERROR("wait for kiq fence error: %ld\n", r);
172 * amdgpu_virt_request_full_gpu() - request full gpu access
173 * @amdgpu: amdgpu device.
174 * @init: is driver init time.
175 * When start to init/fini driver, first need to request full gpu access.
176 * Return: Zero if request success, otherwise will return error.
178 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
180 struct amdgpu_virt *virt = &adev->virt;
183 if (virt->ops && virt->ops->req_full_gpu) {
184 r = virt->ops->req_full_gpu(adev, init);
188 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
195 * amdgpu_virt_release_full_gpu() - release full gpu access
196 * @amdgpu: amdgpu device.
197 * @init: is driver init time.
198 * When finishing driver init/fini, need to release full gpu access.
199 * Return: Zero if release success, otherwise will returen error.
201 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
203 struct amdgpu_virt *virt = &adev->virt;
206 if (virt->ops && virt->ops->rel_full_gpu) {
207 r = virt->ops->rel_full_gpu(adev, init);
211 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
217 * amdgpu_virt_reset_gpu() - reset gpu
218 * @amdgpu: amdgpu device.
219 * Send reset command to GPU hypervisor to reset GPU that VM is using
220 * Return: Zero if reset success, otherwise will return error.
222 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
224 struct amdgpu_virt *virt = &adev->virt;
227 if (virt->ops && virt->ops->reset_gpu) {
228 r = virt->ops->reset_gpu(adev);
232 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
239 * amdgpu_virt_wait_reset() - wait for reset gpu completed
240 * @amdgpu: amdgpu device.
241 * Wait for GPU reset completed.
242 * Return: Zero if reset success, otherwise will return error.
244 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
246 struct amdgpu_virt *virt = &adev->virt;
248 if (!virt->ops || !virt->ops->wait_reset)
251 return virt->ops->wait_reset(adev);
255 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
256 * @amdgpu: amdgpu device.
257 * MM table is used by UVD and VCE for its initialization
258 * Return: Zero if allocate success.
260 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
264 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
267 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
268 AMDGPU_GEM_DOMAIN_VRAM,
269 &adev->virt.mm_table.bo,
270 &adev->virt.mm_table.gpu_addr,
271 (void *)&adev->virt.mm_table.cpu_addr);
273 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
277 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
278 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
279 adev->virt.mm_table.gpu_addr,
280 adev->virt.mm_table.cpu_addr);
285 * amdgpu_virt_free_mm_table() - free mm table memory
286 * @amdgpu: amdgpu device.
287 * Free MM table memory
289 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
291 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
294 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
295 &adev->virt.mm_table.gpu_addr,
296 (void *)&adev->virt.mm_table.cpu_addr);
297 adev->virt.mm_table.gpu_addr = 0;
301 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
302 unsigned long obj_size,
306 unsigned int ret = key;
311 /* calculate checksum */
312 for (i = 0; i < obj_size; ++i)
314 /* minus the chksum itself */
315 pos = (char *)&chksum;
316 for (i = 0; i < sizeof(chksum); ++i)
321 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
323 uint32_t pf2vf_ver = 0;
324 uint32_t pf2vf_size = 0;
325 uint32_t checksum = 0;
329 adev->virt.fw_reserve.p_pf2vf = NULL;
330 adev->virt.fw_reserve.p_vf2pf = NULL;
332 if (adev->fw_vram_usage.va != NULL) {
333 adev->virt.fw_reserve.p_pf2vf =
334 (struct amdgim_pf2vf_info_header *)(
335 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
336 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
337 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
338 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
340 /* pf2vf message must be in 4K */
341 if (pf2vf_size > 0 && pf2vf_size < 4096) {
342 checkval = amdgpu_virt_fw_reserve_get_checksum(
343 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
344 adev->virt.fw_reserve.checksum_key, checksum);
345 if (checkval == checksum) {
346 adev->virt.fw_reserve.p_vf2pf =
347 ((void *)adev->virt.fw_reserve.p_pf2vf +
349 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
350 sizeof(amdgim_vf2pf_info));
351 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
352 AMDGPU_FW_VRAM_VF2PF_VER);
353 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
354 sizeof(amdgim_vf2pf_info));
355 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
358 if (THIS_MODULE->version != NULL)
359 strcpy(str, THIS_MODULE->version);
363 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
365 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
366 amdgpu_virt_fw_reserve_get_checksum(
367 adev->virt.fw_reserve.p_vf2pf,
369 adev->virt.fw_reserve.checksum_key, 0));