]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drm/amdgpu/virt: add wait_reset virt ops
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #define MAX_KIQ_REG_WAIT        100000000 /* in usecs */
26
27 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28 {
29         /* By now all MMIO pages except mailbox are blocked */
30         /* if blocking is enabled in hypervisor. Choose the */
31         /* SCRATCH_REG0 to test. */
32         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
33 }
34
35 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
36 {
37         int r;
38         void *ptr;
39
40         r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
41                                 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
42                                 &adev->virt.csa_vmid0_addr, &ptr);
43         if (r)
44                 return r;
45
46         memset(ptr, 0, AMDGPU_CSA_SIZE);
47         return 0;
48 }
49
50 /*
51  * amdgpu_map_static_csa should be called during amdgpu_vm_init
52  * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
53  * to this VM, and each command submission of GFX should use this virtual
54  * address within META_DATA init package to support SRIOV gfx preemption.
55  */
56
57 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
58                           struct amdgpu_bo_va **bo_va)
59 {
60         struct ww_acquire_ctx ticket;
61         struct list_head list;
62         struct amdgpu_bo_list_entry pd;
63         struct ttm_validate_buffer csa_tv;
64         int r;
65
66         INIT_LIST_HEAD(&list);
67         INIT_LIST_HEAD(&csa_tv.head);
68         csa_tv.bo = &adev->virt.csa_obj->tbo;
69         csa_tv.shared = true;
70
71         list_add(&csa_tv.head, &list);
72         amdgpu_vm_get_pd_bo(vm, &list, &pd);
73
74         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
75         if (r) {
76                 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
77                 return r;
78         }
79
80         *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
81         if (!*bo_va) {
82                 ttm_eu_backoff_reservation(&ticket, &list);
83                 DRM_ERROR("failed to create bo_va for static CSA\n");
84                 return -ENOMEM;
85         }
86
87         r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
88                                 AMDGPU_CSA_SIZE);
89         if (r) {
90                 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
91                 amdgpu_vm_bo_rmv(adev, *bo_va);
92                 ttm_eu_backoff_reservation(&ticket, &list);
93                 return r;
94         }
95
96         r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
97                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
98                              AMDGPU_PTE_EXECUTABLE);
99
100         if (r) {
101                 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
102                 amdgpu_vm_bo_rmv(adev, *bo_va);
103                 ttm_eu_backoff_reservation(&ticket, &list);
104                 return r;
105         }
106
107         ttm_eu_backoff_reservation(&ticket, &list);
108         return 0;
109 }
110
111 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
112 {
113         /* enable virtual display */
114         adev->mode_info.num_crtc = 1;
115         adev->enable_virtual_display = true;
116         adev->cg_flags = 0;
117         adev->pg_flags = 0;
118
119         mutex_init(&adev->virt.lock_reset);
120 }
121
122 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
123 {
124         signed long r;
125         unsigned long flags;
126         uint32_t val, seq;
127         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
128         struct amdgpu_ring *ring = &kiq->ring;
129
130         BUG_ON(!ring->funcs->emit_rreg);
131
132         spin_lock_irqsave(&kiq->ring_lock, flags);
133         amdgpu_ring_alloc(ring, 32);
134         amdgpu_ring_emit_rreg(ring, reg);
135         amdgpu_fence_emit_polling(ring, &seq);
136         amdgpu_ring_commit(ring);
137         spin_unlock_irqrestore(&kiq->ring_lock, flags);
138
139         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
140         if (r < 1) {
141                 DRM_ERROR("wait for kiq fence error: %ld\n", r);
142                 return ~0;
143         }
144         val = adev->wb.wb[adev->virt.reg_val_offs];
145
146         return val;
147 }
148
149 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
150 {
151         signed long r;
152         unsigned long flags;
153         uint32_t seq;
154         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
155         struct amdgpu_ring *ring = &kiq->ring;
156
157         BUG_ON(!ring->funcs->emit_wreg);
158
159         spin_lock_irqsave(&kiq->ring_lock, flags);
160         amdgpu_ring_alloc(ring, 32);
161         amdgpu_ring_emit_wreg(ring, reg, v);
162         amdgpu_fence_emit_polling(ring, &seq);
163         amdgpu_ring_commit(ring);
164         spin_unlock_irqrestore(&kiq->ring_lock, flags);
165
166         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
167         if (r < 1)
168                 DRM_ERROR("wait for kiq fence error: %ld\n", r);
169 }
170
171 /**
172  * amdgpu_virt_request_full_gpu() - request full gpu access
173  * @amdgpu:     amdgpu device.
174  * @init:       is driver init time.
175  * When start to init/fini driver, first need to request full gpu access.
176  * Return: Zero if request success, otherwise will return error.
177  */
178 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
179 {
180         struct amdgpu_virt *virt = &adev->virt;
181         int r;
182
183         if (virt->ops && virt->ops->req_full_gpu) {
184                 r = virt->ops->req_full_gpu(adev, init);
185                 if (r)
186                         return r;
187
188                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
189         }
190
191         return 0;
192 }
193
194 /**
195  * amdgpu_virt_release_full_gpu() - release full gpu access
196  * @amdgpu:     amdgpu device.
197  * @init:       is driver init time.
198  * When finishing driver init/fini, need to release full gpu access.
199  * Return: Zero if release success, otherwise will returen error.
200  */
201 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
202 {
203         struct amdgpu_virt *virt = &adev->virt;
204         int r;
205
206         if (virt->ops && virt->ops->rel_full_gpu) {
207                 r = virt->ops->rel_full_gpu(adev, init);
208                 if (r)
209                         return r;
210
211                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
212         }
213         return 0;
214 }
215
216 /**
217  * amdgpu_virt_reset_gpu() - reset gpu
218  * @amdgpu:     amdgpu device.
219  * Send reset command to GPU hypervisor to reset GPU that VM is using
220  * Return: Zero if reset success, otherwise will return error.
221  */
222 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
223 {
224         struct amdgpu_virt *virt = &adev->virt;
225         int r;
226
227         if (virt->ops && virt->ops->reset_gpu) {
228                 r = virt->ops->reset_gpu(adev);
229                 if (r)
230                         return r;
231
232                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
233         }
234
235         return 0;
236 }
237
238 /**
239  * amdgpu_virt_wait_reset() - wait for reset gpu completed
240  * @amdgpu:     amdgpu device.
241  * Wait for GPU reset completed.
242  * Return: Zero if reset success, otherwise will return error.
243  */
244 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
245 {
246         struct amdgpu_virt *virt = &adev->virt;
247
248         if (!virt->ops || !virt->ops->wait_reset)
249                 return -EINVAL;
250
251         return virt->ops->wait_reset(adev);
252 }
253
254 /**
255  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
256  * @amdgpu:     amdgpu device.
257  * MM table is used by UVD and VCE for its initialization
258  * Return: Zero if allocate success.
259  */
260 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
261 {
262         int r;
263
264         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
265                 return 0;
266
267         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
268                                     AMDGPU_GEM_DOMAIN_VRAM,
269                                     &adev->virt.mm_table.bo,
270                                     &adev->virt.mm_table.gpu_addr,
271                                     (void *)&adev->virt.mm_table.cpu_addr);
272         if (r) {
273                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
274                 return r;
275         }
276
277         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
278         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
279                  adev->virt.mm_table.gpu_addr,
280                  adev->virt.mm_table.cpu_addr);
281         return 0;
282 }
283
284 /**
285  * amdgpu_virt_free_mm_table() - free mm table memory
286  * @amdgpu:     amdgpu device.
287  * Free MM table memory
288  */
289 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
290 {
291         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
292                 return;
293
294         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
295                               &adev->virt.mm_table.gpu_addr,
296                               (void *)&adev->virt.mm_table.cpu_addr);
297         adev->virt.mm_table.gpu_addr = 0;
298 }
299
300
301 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
302                                         unsigned long obj_size,
303                                         unsigned int key,
304                                         unsigned int chksum)
305 {
306         unsigned int ret = key;
307         unsigned long i = 0;
308         unsigned char *pos;
309
310         pos = (char *)obj;
311         /* calculate checksum */
312         for (i = 0; i < obj_size; ++i)
313                 ret += *(pos + i);
314         /* minus the chksum itself */
315         pos = (char *)&chksum;
316         for (i = 0; i < sizeof(chksum); ++i)
317                 ret -= *(pos + i);
318         return ret;
319 }
320
321 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
322 {
323         uint32_t pf2vf_ver = 0;
324         uint32_t pf2vf_size = 0;
325         uint32_t checksum = 0;
326         uint32_t checkval;
327         char *str;
328
329         adev->virt.fw_reserve.p_pf2vf = NULL;
330         adev->virt.fw_reserve.p_vf2pf = NULL;
331
332         if (adev->fw_vram_usage.va != NULL) {
333                 adev->virt.fw_reserve.p_pf2vf =
334                         (struct amdgim_pf2vf_info_header *)(
335                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
336                 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
337                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
338                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
339
340                 /* pf2vf message must be in 4K */
341                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
342                         checkval = amdgpu_virt_fw_reserve_get_checksum(
343                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
344                                 adev->virt.fw_reserve.checksum_key, checksum);
345                         if (checkval == checksum) {
346                                 adev->virt.fw_reserve.p_vf2pf =
347                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
348                                         pf2vf_size);
349                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
350                                         sizeof(amdgim_vf2pf_info));
351                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
352                                         AMDGPU_FW_VRAM_VF2PF_VER);
353                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
354                                         sizeof(amdgim_vf2pf_info));
355                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
356                                         &str);
357 #ifdef MODULE
358                                 if (THIS_MODULE->version != NULL)
359                                         strcpy(str, THIS_MODULE->version);
360                                 else
361 #endif
362                                         strcpy(str, "N/A");
363                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
364                                         0);
365                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
366                                         amdgpu_virt_fw_reserve_get_checksum(
367                                         adev->virt.fw_reserve.p_vf2pf,
368                                         pf2vf_size,
369                                         adev->virt.fw_reserve.checksum_key, 0));
370                         }
371                 }
372         }
373 }
374
375
This page took 0.05215 seconds and 4 git commands to generate.