]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drm/amdgpu: Modify the argument of emit_ib interface
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25
26 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
27 {
28         /* By now all MMIO pages except mailbox are blocked */
29         /* if blocking is enabled in hypervisor. Choose the */
30         /* SCRATCH_REG0 to test. */
31         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
32 }
33
34 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
35 {
36         /* enable virtual display */
37         adev->mode_info.num_crtc = 1;
38         adev->enable_virtual_display = true;
39         adev->cg_flags = 0;
40         adev->pg_flags = 0;
41 }
42
43 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
44 {
45         signed long r, cnt = 0;
46         unsigned long flags;
47         uint32_t seq;
48         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
49         struct amdgpu_ring *ring = &kiq->ring;
50
51         BUG_ON(!ring->funcs->emit_rreg);
52
53         spin_lock_irqsave(&kiq->ring_lock, flags);
54         amdgpu_ring_alloc(ring, 32);
55         amdgpu_ring_emit_rreg(ring, reg);
56         amdgpu_fence_emit_polling(ring, &seq);
57         amdgpu_ring_commit(ring);
58         spin_unlock_irqrestore(&kiq->ring_lock, flags);
59
60         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
61
62         /* don't wait anymore for gpu reset case because this way may
63          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
64          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
65          * never return if we keep waiting in virt_kiq_rreg, which cause
66          * gpu_recover() hang there.
67          *
68          * also don't wait anymore for IRQ context
69          * */
70         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
71                 goto failed_kiq_read;
72
73         if (in_interrupt())
74                 might_sleep();
75
76         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
77                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
78                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
79         }
80
81         if (cnt > MAX_KIQ_REG_TRY)
82                 goto failed_kiq_read;
83
84         return adev->wb.wb[adev->virt.reg_val_offs];
85
86 failed_kiq_read:
87         pr_err("failed to read reg:%x\n", reg);
88         return ~0;
89 }
90
91 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
92 {
93         signed long r, cnt = 0;
94         unsigned long flags;
95         uint32_t seq;
96         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
97         struct amdgpu_ring *ring = &kiq->ring;
98
99         BUG_ON(!ring->funcs->emit_wreg);
100
101         spin_lock_irqsave(&kiq->ring_lock, flags);
102         amdgpu_ring_alloc(ring, 32);
103         amdgpu_ring_emit_wreg(ring, reg, v);
104         amdgpu_fence_emit_polling(ring, &seq);
105         amdgpu_ring_commit(ring);
106         spin_unlock_irqrestore(&kiq->ring_lock, flags);
107
108         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
109
110         /* don't wait anymore for gpu reset case because this way may
111          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
112          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
113          * never return if we keep waiting in virt_kiq_rreg, which cause
114          * gpu_recover() hang there.
115          *
116          * also don't wait anymore for IRQ context
117          * */
118         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
119                 goto failed_kiq_write;
120
121         if (in_interrupt())
122                 might_sleep();
123
124         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
125
126                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
127                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
128         }
129
130         if (cnt > MAX_KIQ_REG_TRY)
131                 goto failed_kiq_write;
132
133         return;
134
135 failed_kiq_write:
136         pr_err("failed to write reg:%x\n", reg);
137 }
138
139 /**
140  * amdgpu_virt_request_full_gpu() - request full gpu access
141  * @amdgpu:     amdgpu device.
142  * @init:       is driver init time.
143  * When start to init/fini driver, first need to request full gpu access.
144  * Return: Zero if request success, otherwise will return error.
145  */
146 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
147 {
148         struct amdgpu_virt *virt = &adev->virt;
149         int r;
150
151         if (virt->ops && virt->ops->req_full_gpu) {
152                 r = virt->ops->req_full_gpu(adev, init);
153                 if (r)
154                         return r;
155
156                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
157         }
158
159         return 0;
160 }
161
162 /**
163  * amdgpu_virt_release_full_gpu() - release full gpu access
164  * @amdgpu:     amdgpu device.
165  * @init:       is driver init time.
166  * When finishing driver init/fini, need to release full gpu access.
167  * Return: Zero if release success, otherwise will returen error.
168  */
169 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
170 {
171         struct amdgpu_virt *virt = &adev->virt;
172         int r;
173
174         if (virt->ops && virt->ops->rel_full_gpu) {
175                 r = virt->ops->rel_full_gpu(adev, init);
176                 if (r)
177                         return r;
178
179                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
180         }
181         return 0;
182 }
183
184 /**
185  * amdgpu_virt_reset_gpu() - reset gpu
186  * @amdgpu:     amdgpu device.
187  * Send reset command to GPU hypervisor to reset GPU that VM is using
188  * Return: Zero if reset success, otherwise will return error.
189  */
190 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
191 {
192         struct amdgpu_virt *virt = &adev->virt;
193         int r;
194
195         if (virt->ops && virt->ops->reset_gpu) {
196                 r = virt->ops->reset_gpu(adev);
197                 if (r)
198                         return r;
199
200                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
201         }
202
203         return 0;
204 }
205
206 /**
207  * amdgpu_virt_wait_reset() - wait for reset gpu completed
208  * @amdgpu:     amdgpu device.
209  * Wait for GPU reset completed.
210  * Return: Zero if reset success, otherwise will return error.
211  */
212 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
213 {
214         struct amdgpu_virt *virt = &adev->virt;
215
216         if (!virt->ops || !virt->ops->wait_reset)
217                 return -EINVAL;
218
219         return virt->ops->wait_reset(adev);
220 }
221
222 /**
223  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
224  * @amdgpu:     amdgpu device.
225  * MM table is used by UVD and VCE for its initialization
226  * Return: Zero if allocate success.
227  */
228 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
229 {
230         int r;
231
232         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
233                 return 0;
234
235         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
236                                     AMDGPU_GEM_DOMAIN_VRAM,
237                                     &adev->virt.mm_table.bo,
238                                     &adev->virt.mm_table.gpu_addr,
239                                     (void *)&adev->virt.mm_table.cpu_addr);
240         if (r) {
241                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
242                 return r;
243         }
244
245         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
246         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
247                  adev->virt.mm_table.gpu_addr,
248                  adev->virt.mm_table.cpu_addr);
249         return 0;
250 }
251
252 /**
253  * amdgpu_virt_free_mm_table() - free mm table memory
254  * @amdgpu:     amdgpu device.
255  * Free MM table memory
256  */
257 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
258 {
259         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
260                 return;
261
262         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
263                               &adev->virt.mm_table.gpu_addr,
264                               (void *)&adev->virt.mm_table.cpu_addr);
265         adev->virt.mm_table.gpu_addr = 0;
266 }
267
268
269 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
270                                         unsigned long obj_size,
271                                         unsigned int key,
272                                         unsigned int chksum)
273 {
274         unsigned int ret = key;
275         unsigned long i = 0;
276         unsigned char *pos;
277
278         pos = (char *)obj;
279         /* calculate checksum */
280         for (i = 0; i < obj_size; ++i)
281                 ret += *(pos + i);
282         /* minus the chksum itself */
283         pos = (char *)&chksum;
284         for (i = 0; i < sizeof(chksum); ++i)
285                 ret -= *(pos + i);
286         return ret;
287 }
288
289 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
290 {
291         uint32_t pf2vf_size = 0;
292         uint32_t checksum = 0;
293         uint32_t checkval;
294         char *str;
295
296         adev->virt.fw_reserve.p_pf2vf = NULL;
297         adev->virt.fw_reserve.p_vf2pf = NULL;
298
299         if (adev->fw_vram_usage.va != NULL) {
300                 adev->virt.fw_reserve.p_pf2vf =
301                         (struct amdgim_pf2vf_info_header *)(
302                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
303                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
304                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
305                 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
306
307                 /* pf2vf message must be in 4K */
308                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
309                         checkval = amdgpu_virt_fw_reserve_get_checksum(
310                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
311                                 adev->virt.fw_reserve.checksum_key, checksum);
312                         if (checkval == checksum) {
313                                 adev->virt.fw_reserve.p_vf2pf =
314                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
315                                         pf2vf_size);
316                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
317                                         sizeof(amdgim_vf2pf_info));
318                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
319                                         AMDGPU_FW_VRAM_VF2PF_VER);
320                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
321                                         sizeof(amdgim_vf2pf_info));
322                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
323                                         &str);
324 #ifdef MODULE
325                                 if (THIS_MODULE->version != NULL)
326                                         strcpy(str, THIS_MODULE->version);
327                                 else
328 #endif
329                                         strcpy(str, "N/A");
330                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
331                                         0);
332                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
333                                         amdgpu_virt_fw_reserve_get_checksum(
334                                         adev->virt.fw_reserve.p_vf2pf,
335                                         pf2vf_size,
336                                         adev->virt.fw_reserve.checksum_key, 0));
337                         }
338                 }
339         }
340 }
341
342
This page took 0.054111 seconds and 4 git commands to generate.