2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_ids.h"
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
30 #include "amdgpu_trace.h"
35 * PASIDs are global address space identifiers that can be shared
36 * between the GPU, an IOMMU and the driver. VMs on different devices
37 * may use the same PASID if they share the same address
38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
39 * looked up from the PASID per amdgpu_device.
41 static DEFINE_IDA(amdgpu_pasid_ida);
43 /* Helper to free pasid from a fence callback */
44 struct amdgpu_pasid_cb {
45 struct dma_fence_cb cb;
50 * amdgpu_pasid_alloc - Allocate a PASID
51 * @bits: Maximum width of the PASID in bits, must be at least 1
53 * Allocates a PASID of the given width while keeping smaller PASIDs
54 * available if possible.
56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58 * memory allocation failure.
60 int amdgpu_pasid_alloc(unsigned int bits)
64 for (bits = min(bits, 31U); bits > 0; bits--) {
65 pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
66 (1U << bits) - 1, GFP_KERNEL);
72 trace_amdgpu_pasid_allocated(pasid);
78 * amdgpu_pasid_free - Free a PASID
79 * @pasid: PASID to free
81 void amdgpu_pasid_free(u32 pasid)
83 trace_amdgpu_pasid_freed(pasid);
84 ida_free(&amdgpu_pasid_ida, pasid);
87 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
88 struct dma_fence_cb *_cb)
90 struct amdgpu_pasid_cb *cb =
91 container_of(_cb, struct amdgpu_pasid_cb, cb);
93 amdgpu_pasid_free(cb->pasid);
99 * amdgpu_pasid_free_delayed - free pasid when fences signal
101 * @resv: reservation object with the fences to wait for
102 * @pasid: pasid to free
104 * Free the pasid only after all the fences in resv are signaled.
106 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
109 struct amdgpu_pasid_cb *cb;
110 struct dma_fence *fence;
113 r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
118 amdgpu_pasid_free(pasid);
122 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
124 /* Last resort when we are OOM */
125 dma_fence_wait(fence, false);
126 dma_fence_put(fence);
127 amdgpu_pasid_free(pasid);
130 if (dma_fence_add_callback(fence, &cb->cb,
131 amdgpu_pasid_free_cb))
132 amdgpu_pasid_free_cb(fence, &cb->cb);
138 /* Not enough memory for the delayed delete, as last resort
139 * block for all the fences to complete.
141 dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
142 false, MAX_SCHEDULE_TIMEOUT);
143 amdgpu_pasid_free(pasid);
149 * VMIDs are a per VMHUB identifier for page tables handling.
153 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
155 * @adev: amdgpu_device pointer
156 * @id: VMID structure
158 * Check if GPU reset occured since last use of the VMID.
160 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
161 struct amdgpu_vmid *id)
163 return id->current_gpu_reset_count !=
164 atomic_read(&adev->gpu_reset_counter);
167 /* Check if we need to switch to another set of resources */
168 static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
169 struct amdgpu_job *job)
171 return id->gds_base != job->gds_base ||
172 id->gds_size != job->gds_size ||
173 id->gws_base != job->gws_base ||
174 id->gws_size != job->gws_size ||
175 id->oa_base != job->oa_base ||
176 id->oa_size != job->oa_size;
179 /* Check if the id is compatible with the job */
180 static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
181 struct amdgpu_job *job)
183 return id->pd_gpu_addr == job->vm_pd_addr &&
184 !amdgpu_vmid_gds_switch_needed(id, job);
188 * amdgpu_vmid_grab_idle - grab idle VMID
190 * @ring: ring we want to submit job to
191 * @idle: resulting idle VMID
192 * @fence: fence to wait for if no id could be grabbed
194 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
195 * object. Returns -ENOMEM when we are out of memory.
197 static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
198 struct amdgpu_vmid **idle,
199 struct dma_fence **fence)
201 struct amdgpu_device *adev = ring->adev;
202 unsigned vmhub = ring->vm_hub;
203 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
204 struct dma_fence **fences;
207 if (!dma_fence_is_signaled(ring->vmid_wait)) {
208 *fence = dma_fence_get(ring->vmid_wait);
212 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
216 /* Check if we have an idle VMID */
218 list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
219 /* Don't use per engine and per process VMID at the same time */
220 struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
223 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
229 /* If we can't find a idle VMID to use, wait till one becomes available */
230 if (&(*idle)->list == &id_mgr->ids_lru) {
231 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
232 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
233 struct dma_fence_array *array;
237 for (j = 0; j < i; ++j)
238 dma_fence_get(fences[j]);
240 array = dma_fence_array_create(i, fences, fence_context,
243 for (j = 0; j < i; ++j)
244 dma_fence_put(fences[j]);
249 *fence = dma_fence_get(&array->base);
250 dma_fence_put(ring->vmid_wait);
251 ring->vmid_wait = &array->base;
260 * amdgpu_vmid_grab_reserved - try to assign reserved VMID
262 * @vm: vm to allocate id for
263 * @ring: ring we want to submit job to
264 * @job: job who wants to use the VMID
265 * @id: resulting VMID
266 * @fence: fence to wait for if no id could be grabbed
268 * Try to assign a reserved VMID.
270 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
271 struct amdgpu_ring *ring,
272 struct amdgpu_job *job,
273 struct amdgpu_vmid **id,
274 struct dma_fence **fence)
276 struct amdgpu_device *adev = ring->adev;
277 unsigned vmhub = ring->vm_hub;
278 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
279 uint64_t fence_context = adev->fence_context + ring->idx;
280 bool needs_flush = vm->use_cpu_for_update;
281 uint64_t updates = amdgpu_vm_tlb_seq(vm);
284 *id = id_mgr->reserved;
285 if ((*id)->owner != vm->immediate.fence_context ||
286 !amdgpu_vmid_compatible(*id, job) ||
287 (*id)->flushed_updates < updates ||
288 !(*id)->last_flush ||
289 ((*id)->last_flush->context != fence_context &&
290 !dma_fence_is_signaled((*id)->last_flush))) {
291 struct dma_fence *tmp;
293 /* Wait for the gang to be assembled before using a
294 * reserved VMID or otherwise the gang could deadlock.
296 tmp = amdgpu_device_get_gang(adev);
297 if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
304 /* Make sure the id is owned by the gang before proceeding */
305 if (!job->gang_submit ||
306 (*id)->owner != vm->immediate.fence_context) {
308 /* Don't use per engine and per process VMID at the
311 if (adev->vm_manager.concurrent_flush)
314 /* to prevent one context starved by another context */
315 (*id)->pd_gpu_addr = 0;
316 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
319 *fence = dma_fence_get(tmp);
326 /* Good we can use this VMID. Remember this submission as
329 r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
333 job->vm_needs_flush = needs_flush;
334 job->spm_update_needed = true;
339 * amdgpu_vmid_grab_used - try to reuse a VMID
341 * @vm: vm to allocate id for
342 * @ring: ring we want to submit job to
343 * @job: job who wants to use the VMID
344 * @id: resulting VMID
346 * Try to reuse a VMID for this submission.
348 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
349 struct amdgpu_ring *ring,
350 struct amdgpu_job *job,
351 struct amdgpu_vmid **id)
353 struct amdgpu_device *adev = ring->adev;
354 unsigned vmhub = ring->vm_hub;
355 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
356 uint64_t fence_context = adev->fence_context + ring->idx;
357 uint64_t updates = amdgpu_vm_tlb_seq(vm);
360 job->vm_needs_flush = vm->use_cpu_for_update;
362 /* Check if we can use a VMID already assigned to this VM */
363 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
364 bool needs_flush = vm->use_cpu_for_update;
366 /* Check all the prerequisites to using this VMID */
367 if ((*id)->owner != vm->immediate.fence_context)
370 if (!amdgpu_vmid_compatible(*id, job))
373 if (!(*id)->last_flush ||
374 ((*id)->last_flush->context != fence_context &&
375 !dma_fence_is_signaled((*id)->last_flush)))
378 if ((*id)->flushed_updates < updates)
381 if (needs_flush && !adev->vm_manager.concurrent_flush)
384 /* Good, we can use this VMID. Remember this submission as
387 r = amdgpu_sync_fence(&(*id)->active,
388 &job->base.s_fence->finished);
392 job->vm_needs_flush |= needs_flush;
401 * amdgpu_vmid_grab - allocate the next free VMID
403 * @vm: vm to allocate id for
404 * @ring: ring we want to submit job to
405 * @job: job who wants to use the VMID
406 * @fence: fence to wait for if no id could be grabbed
408 * Allocate an id for the vm, adding fences to the sync obj as necessary.
410 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
411 struct amdgpu_job *job, struct dma_fence **fence)
413 struct amdgpu_device *adev = ring->adev;
414 unsigned vmhub = ring->vm_hub;
415 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
416 struct amdgpu_vmid *idle = NULL;
417 struct amdgpu_vmid *id = NULL;
420 mutex_lock(&id_mgr->lock);
421 r = amdgpu_vmid_grab_idle(ring, &idle, fence);
425 if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
426 r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
430 r = amdgpu_vmid_grab_used(vm, ring, job, &id);
435 /* Still no ID to use? Then use the idle one found earlier */
438 /* Remember this submission as user of the VMID */
439 r = amdgpu_sync_fence(&id->active,
440 &job->base.s_fence->finished);
444 job->vm_needs_flush = true;
447 list_move_tail(&id->list, &id_mgr->ids_lru);
450 job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
451 if (job->vm_needs_flush) {
452 id->flushed_updates = amdgpu_vm_tlb_seq(vm);
453 dma_fence_put(id->last_flush);
454 id->last_flush = NULL;
456 job->vmid = id - id_mgr->ids;
457 job->pasid = vm->pasid;
459 id->gds_base = job->gds_base;
460 id->gds_size = job->gds_size;
461 id->gws_base = job->gws_base;
462 id->gws_size = job->gws_size;
463 id->oa_base = job->oa_base;
464 id->oa_size = job->oa_size;
465 id->pd_gpu_addr = job->vm_pd_addr;
466 id->owner = vm->immediate.fence_context;
468 trace_amdgpu_vm_grab_id(vm, ring, job);
471 mutex_unlock(&id_mgr->lock);
476 * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
477 * @adev: amdgpu_device pointer
478 * @vm: the VM to check
479 * @vmhub: the VMHUB which will be used
481 * Returns: True if the VM will use a reserved VMID.
483 bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
484 struct amdgpu_vm *vm, unsigned int vmhub)
486 return vm->reserved_vmid[vmhub] ||
487 (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
488 vm->root.bo->xcp_id : 0] &&
489 AMDGPU_IS_GFXHUB(vmhub));
492 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
495 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
497 mutex_lock(&id_mgr->lock);
499 ++id_mgr->reserved_use_count;
500 if (!id_mgr->reserved) {
501 struct amdgpu_vmid *id;
503 id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
505 /* Remove from normal round robin handling */
506 list_del_init(&id->list);
507 id_mgr->reserved = id;
510 mutex_unlock(&id_mgr->lock);
514 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
517 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
519 mutex_lock(&id_mgr->lock);
520 if (!--id_mgr->reserved_use_count) {
521 /* give the reserved ID back to normal round robin */
522 list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
523 id_mgr->reserved = NULL;
526 mutex_unlock(&id_mgr->lock);
530 * amdgpu_vmid_reset - reset VMID to zero
532 * @adev: amdgpu device structure
534 * @vmid: vmid number to use
536 * Reset saved GDW, GWS and OA to force switch on next flush.
538 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
541 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
542 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
544 mutex_lock(&id_mgr->lock);
552 mutex_unlock(&id_mgr->lock);
556 * amdgpu_vmid_reset_all - reset VMID to zero
558 * @adev: amdgpu device structure
560 * Reset VMID to force flush on next use
562 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
566 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
567 struct amdgpu_vmid_mgr *id_mgr =
568 &adev->vm_manager.id_mgr[i];
570 for (j = 1; j < id_mgr->num_ids; ++j)
571 amdgpu_vmid_reset(adev, i, j);
576 * amdgpu_vmid_mgr_init - init the VMID manager
578 * @adev: amdgpu_device pointer
580 * Initialize the VM manager structures
582 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
586 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
587 struct amdgpu_vmid_mgr *id_mgr =
588 &adev->vm_manager.id_mgr[i];
590 mutex_init(&id_mgr->lock);
591 INIT_LIST_HEAD(&id_mgr->ids_lru);
592 id_mgr->reserved_use_count = 0;
594 /* manage only VMIDs not used by KFD */
595 id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
597 /* skip over VMID 0, since it is the system VM */
598 for (j = 1; j < id_mgr->num_ids; ++j) {
599 amdgpu_vmid_reset(adev, i, j);
600 amdgpu_sync_create(&id_mgr->ids[j].active);
601 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
604 /* alloc a default reserved vmid to enforce isolation */
605 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
606 if (adev->enforce_isolation[i])
607 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
612 * amdgpu_vmid_mgr_fini - cleanup VM manager
614 * @adev: amdgpu_device pointer
616 * Cleanup the VM manager and free resources.
618 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
622 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
623 struct amdgpu_vmid_mgr *id_mgr =
624 &adev->vm_manager.id_mgr[i];
626 mutex_destroy(&id_mgr->lock);
627 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
628 struct amdgpu_vmid *id = &id_mgr->ids[j];
630 amdgpu_sync_free(&id->active);
631 dma_fence_put(id->last_flush);
632 dma_fence_put(id->pasid_mapping);