]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drm/amdgpu:cleanup in_sriov_reset and lock_reset
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #define MAX_KIQ_REG_WAIT        100000000 /* in usecs */
26
27 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28 {
29         /* By now all MMIO pages except mailbox are blocked */
30         /* if blocking is enabled in hypervisor. Choose the */
31         /* SCRATCH_REG0 to test. */
32         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
33 }
34
35 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
36 {
37         int r;
38         void *ptr;
39
40         r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
41                                 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
42                                 &adev->virt.csa_vmid0_addr, &ptr);
43         if (r)
44                 return r;
45
46         memset(ptr, 0, AMDGPU_CSA_SIZE);
47         return 0;
48 }
49
50 /*
51  * amdgpu_map_static_csa should be called during amdgpu_vm_init
52  * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
53  * to this VM, and each command submission of GFX should use this virtual
54  * address within META_DATA init package to support SRIOV gfx preemption.
55  */
56
57 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
58                           struct amdgpu_bo_va **bo_va)
59 {
60         struct ww_acquire_ctx ticket;
61         struct list_head list;
62         struct amdgpu_bo_list_entry pd;
63         struct ttm_validate_buffer csa_tv;
64         int r;
65
66         INIT_LIST_HEAD(&list);
67         INIT_LIST_HEAD(&csa_tv.head);
68         csa_tv.bo = &adev->virt.csa_obj->tbo;
69         csa_tv.shared = true;
70
71         list_add(&csa_tv.head, &list);
72         amdgpu_vm_get_pd_bo(vm, &list, &pd);
73
74         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
75         if (r) {
76                 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
77                 return r;
78         }
79
80         *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
81         if (!*bo_va) {
82                 ttm_eu_backoff_reservation(&ticket, &list);
83                 DRM_ERROR("failed to create bo_va for static CSA\n");
84                 return -ENOMEM;
85         }
86
87         r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
88                                 AMDGPU_CSA_SIZE);
89         if (r) {
90                 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
91                 amdgpu_vm_bo_rmv(adev, *bo_va);
92                 ttm_eu_backoff_reservation(&ticket, &list);
93                 return r;
94         }
95
96         r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
97                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
98                              AMDGPU_PTE_EXECUTABLE);
99
100         if (r) {
101                 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
102                 amdgpu_vm_bo_rmv(adev, *bo_va);
103                 ttm_eu_backoff_reservation(&ticket, &list);
104                 return r;
105         }
106
107         ttm_eu_backoff_reservation(&ticket, &list);
108         return 0;
109 }
110
111 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
112 {
113         /* enable virtual display */
114         adev->mode_info.num_crtc = 1;
115         adev->enable_virtual_display = true;
116         adev->cg_flags = 0;
117         adev->pg_flags = 0;
118 }
119
120 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
121 {
122         signed long r;
123         unsigned long flags;
124         uint32_t val, seq;
125         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
126         struct amdgpu_ring *ring = &kiq->ring;
127
128         BUG_ON(!ring->funcs->emit_rreg);
129
130         spin_lock_irqsave(&kiq->ring_lock, flags);
131         amdgpu_ring_alloc(ring, 32);
132         amdgpu_ring_emit_rreg(ring, reg);
133         amdgpu_fence_emit_polling(ring, &seq);
134         amdgpu_ring_commit(ring);
135         spin_unlock_irqrestore(&kiq->ring_lock, flags);
136
137         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
138         if (r < 1) {
139                 DRM_ERROR("wait for kiq fence error: %ld\n", r);
140                 return ~0;
141         }
142         val = adev->wb.wb[adev->virt.reg_val_offs];
143
144         return val;
145 }
146
147 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
148 {
149         signed long r;
150         unsigned long flags;
151         uint32_t seq;
152         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
153         struct amdgpu_ring *ring = &kiq->ring;
154
155         BUG_ON(!ring->funcs->emit_wreg);
156
157         spin_lock_irqsave(&kiq->ring_lock, flags);
158         amdgpu_ring_alloc(ring, 32);
159         amdgpu_ring_emit_wreg(ring, reg, v);
160         amdgpu_fence_emit_polling(ring, &seq);
161         amdgpu_ring_commit(ring);
162         spin_unlock_irqrestore(&kiq->ring_lock, flags);
163
164         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
165         if (r < 1)
166                 DRM_ERROR("wait for kiq fence error: %ld\n", r);
167 }
168
169 /**
170  * amdgpu_virt_request_full_gpu() - request full gpu access
171  * @amdgpu:     amdgpu device.
172  * @init:       is driver init time.
173  * When start to init/fini driver, first need to request full gpu access.
174  * Return: Zero if request success, otherwise will return error.
175  */
176 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
177 {
178         struct amdgpu_virt *virt = &adev->virt;
179         int r;
180
181         if (virt->ops && virt->ops->req_full_gpu) {
182                 r = virt->ops->req_full_gpu(adev, init);
183                 if (r)
184                         return r;
185
186                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
187         }
188
189         return 0;
190 }
191
192 /**
193  * amdgpu_virt_release_full_gpu() - release full gpu access
194  * @amdgpu:     amdgpu device.
195  * @init:       is driver init time.
196  * When finishing driver init/fini, need to release full gpu access.
197  * Return: Zero if release success, otherwise will returen error.
198  */
199 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
200 {
201         struct amdgpu_virt *virt = &adev->virt;
202         int r;
203
204         if (virt->ops && virt->ops->rel_full_gpu) {
205                 r = virt->ops->rel_full_gpu(adev, init);
206                 if (r)
207                         return r;
208
209                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
210         }
211         return 0;
212 }
213
214 /**
215  * amdgpu_virt_reset_gpu() - reset gpu
216  * @amdgpu:     amdgpu device.
217  * Send reset command to GPU hypervisor to reset GPU that VM is using
218  * Return: Zero if reset success, otherwise will return error.
219  */
220 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
221 {
222         struct amdgpu_virt *virt = &adev->virt;
223         int r;
224
225         if (virt->ops && virt->ops->reset_gpu) {
226                 r = virt->ops->reset_gpu(adev);
227                 if (r)
228                         return r;
229
230                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
231         }
232
233         return 0;
234 }
235
236 /**
237  * amdgpu_virt_wait_reset() - wait for reset gpu completed
238  * @amdgpu:     amdgpu device.
239  * Wait for GPU reset completed.
240  * Return: Zero if reset success, otherwise will return error.
241  */
242 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
243 {
244         struct amdgpu_virt *virt = &adev->virt;
245
246         if (!virt->ops || !virt->ops->wait_reset)
247                 return -EINVAL;
248
249         return virt->ops->wait_reset(adev);
250 }
251
252 /**
253  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
254  * @amdgpu:     amdgpu device.
255  * MM table is used by UVD and VCE for its initialization
256  * Return: Zero if allocate success.
257  */
258 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
259 {
260         int r;
261
262         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
263                 return 0;
264
265         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
266                                     AMDGPU_GEM_DOMAIN_VRAM,
267                                     &adev->virt.mm_table.bo,
268                                     &adev->virt.mm_table.gpu_addr,
269                                     (void *)&adev->virt.mm_table.cpu_addr);
270         if (r) {
271                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
272                 return r;
273         }
274
275         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
276         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
277                  adev->virt.mm_table.gpu_addr,
278                  adev->virt.mm_table.cpu_addr);
279         return 0;
280 }
281
282 /**
283  * amdgpu_virt_free_mm_table() - free mm table memory
284  * @amdgpu:     amdgpu device.
285  * Free MM table memory
286  */
287 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
288 {
289         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
290                 return;
291
292         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
293                               &adev->virt.mm_table.gpu_addr,
294                               (void *)&adev->virt.mm_table.cpu_addr);
295         adev->virt.mm_table.gpu_addr = 0;
296 }
297
298
299 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
300                                         unsigned long obj_size,
301                                         unsigned int key,
302                                         unsigned int chksum)
303 {
304         unsigned int ret = key;
305         unsigned long i = 0;
306         unsigned char *pos;
307
308         pos = (char *)obj;
309         /* calculate checksum */
310         for (i = 0; i < obj_size; ++i)
311                 ret += *(pos + i);
312         /* minus the chksum itself */
313         pos = (char *)&chksum;
314         for (i = 0; i < sizeof(chksum); ++i)
315                 ret -= *(pos + i);
316         return ret;
317 }
318
319 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
320 {
321         uint32_t pf2vf_ver = 0;
322         uint32_t pf2vf_size = 0;
323         uint32_t checksum = 0;
324         uint32_t checkval;
325         char *str;
326
327         adev->virt.fw_reserve.p_pf2vf = NULL;
328         adev->virt.fw_reserve.p_vf2pf = NULL;
329
330         if (adev->fw_vram_usage.va != NULL) {
331                 adev->virt.fw_reserve.p_pf2vf =
332                         (struct amdgim_pf2vf_info_header *)(
333                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
334                 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
335                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
336                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
337
338                 /* pf2vf message must be in 4K */
339                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
340                         checkval = amdgpu_virt_fw_reserve_get_checksum(
341                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
342                                 adev->virt.fw_reserve.checksum_key, checksum);
343                         if (checkval == checksum) {
344                                 adev->virt.fw_reserve.p_vf2pf =
345                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
346                                         pf2vf_size);
347                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
348                                         sizeof(amdgim_vf2pf_info));
349                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
350                                         AMDGPU_FW_VRAM_VF2PF_VER);
351                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
352                                         sizeof(amdgim_vf2pf_info));
353                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
354                                         &str);
355 #ifdef MODULE
356                                 if (THIS_MODULE->version != NULL)
357                                         strcpy(str, THIS_MODULE->version);
358                                 else
359 #endif
360                                         strcpy(str, "N/A");
361                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
362                                         0);
363                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
364                                         amdgpu_virt_fw_reserve_get_checksum(
365                                         adev->virt.fw_reserve.p_vf2pf,
366                                         pf2vf_size,
367                                         adev->virt.fw_reserve.checksum_key, 0));
368                         }
369                 }
370         }
371 }
372
373
This page took 0.060227 seconds and 4 git commands to generate.