2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_ids.h"
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
30 #include "amdgpu_trace.h"
35 * PASIDs are global address space identifiers that can be shared
36 * between the GPU, an IOMMU and the driver. VMs on different devices
37 * may use the same PASID if they share the same address
38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
39 * looked up from the PASID per amdgpu_device.
41 static DEFINE_IDA(amdgpu_pasid_ida);
43 /* Helper to free pasid from a fence callback */
44 struct amdgpu_pasid_cb {
45 struct dma_fence_cb cb;
50 * amdgpu_pasid_alloc - Allocate a PASID
51 * @bits: Maximum width of the PASID in bits, must be at least 1
53 * Allocates a PASID of the given width while keeping smaller PASIDs
54 * available if possible.
56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58 * memory allocation failure.
60 int amdgpu_pasid_alloc(unsigned int bits)
64 for (bits = min(bits, 31U); bits > 0; bits--) {
65 pasid = ida_simple_get(&amdgpu_pasid_ida,
66 1U << (bits - 1), 1U << bits,
73 trace_amdgpu_pasid_allocated(pasid);
79 * amdgpu_pasid_free - Free a PASID
80 * @pasid: PASID to free
82 void amdgpu_pasid_free(u32 pasid)
84 trace_amdgpu_pasid_freed(pasid);
85 ida_simple_remove(&amdgpu_pasid_ida, pasid);
88 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89 struct dma_fence_cb *_cb)
91 struct amdgpu_pasid_cb *cb =
92 container_of(_cb, struct amdgpu_pasid_cb, cb);
94 amdgpu_pasid_free(cb->pasid);
100 * amdgpu_pasid_free_delayed - free pasid when fences signal
102 * @resv: reservation object with the fences to wait for
103 * @pasid: pasid to free
105 * Free the pasid only after all the fences in resv are signaled.
107 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
110 struct dma_fence *fence, **fences;
111 struct amdgpu_pasid_cb *cb;
115 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
120 amdgpu_pasid_free(pasid);
128 uint64_t context = dma_fence_context_alloc(1);
129 struct dma_fence_array *array;
131 array = dma_fence_array_create(count, fences, context,
137 fence = &array->base;
140 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
142 /* Last resort when we are OOM */
143 dma_fence_wait(fence, false);
144 dma_fence_put(fence);
145 amdgpu_pasid_free(pasid);
148 if (dma_fence_add_callback(fence, &cb->cb,
149 amdgpu_pasid_free_cb))
150 amdgpu_pasid_free_cb(fence, &cb->cb);
156 /* Not enough memory for the delayed delete, as last resort
157 * block for all the fences to complete.
159 dma_resv_wait_timeout_rcu(resv, true, false,
160 MAX_SCHEDULE_TIMEOUT);
161 amdgpu_pasid_free(pasid);
167 * VMIDs are a per VMHUB identifier for page tables handling.
171 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
173 * @adev: amdgpu_device pointer
174 * @id: VMID structure
176 * Check if GPU reset occured since last use of the VMID.
178 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
179 struct amdgpu_vmid *id)
181 return id->current_gpu_reset_count !=
182 atomic_read(&adev->gpu_reset_counter);
186 * amdgpu_vm_grab_idle - grab idle VMID
188 * @vm: vm to allocate id for
189 * @ring: ring we want to submit job to
190 * @sync: sync object where we add dependencies
191 * @idle: resulting idle VMID
193 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
194 * object. Returns -ENOMEM when we are out of memory.
196 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
197 struct amdgpu_ring *ring,
198 struct amdgpu_sync *sync,
199 struct amdgpu_vmid **idle)
201 struct amdgpu_device *adev = ring->adev;
202 unsigned vmhub = ring->funcs->vmhub;
203 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
204 struct dma_fence **fences;
208 if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
209 return amdgpu_sync_fence(sync, ring->vmid_wait);
211 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
215 /* Check if we have an idle VMID */
217 list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
218 /* Don't use per engine and per process VMID at the same time */
219 struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
222 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
228 /* If we can't find a idle VMID to use, wait till one becomes available */
229 if (&(*idle)->list == &id_mgr->ids_lru) {
230 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
231 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
232 struct dma_fence_array *array;
236 for (j = 0; j < i; ++j)
237 dma_fence_get(fences[j]);
239 array = dma_fence_array_create(i, fences, fence_context,
242 for (j = 0; j < i; ++j)
243 dma_fence_put(fences[j]);
248 r = amdgpu_sync_fence(sync, &array->base);
249 dma_fence_put(ring->vmid_wait);
250 ring->vmid_wait = &array->base;
259 * amdgpu_vm_grab_reserved - try to assign reserved VMID
261 * @vm: vm to allocate id for
262 * @ring: ring we want to submit job to
263 * @sync: sync object where we add dependencies
264 * @fence: fence protecting ID from reuse
265 * @job: job who wants to use the VMID
266 * @id: resulting VMID
268 * Try to assign a reserved VMID.
270 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
271 struct amdgpu_ring *ring,
272 struct amdgpu_sync *sync,
273 struct dma_fence *fence,
274 struct amdgpu_job *job,
275 struct amdgpu_vmid **id)
277 struct amdgpu_device *adev = ring->adev;
278 unsigned vmhub = ring->funcs->vmhub;
279 uint64_t fence_context = adev->fence_context + ring->idx;
280 struct dma_fence *updates = sync->last_vm_update;
281 bool needs_flush = vm->use_cpu_for_update;
284 *id = vm->reserved_vmid[vmhub];
285 if (updates && (*id)->flushed_updates &&
286 updates->context == (*id)->flushed_updates->context &&
287 !dma_fence_is_later(updates, (*id)->flushed_updates))
290 if ((*id)->owner != vm->immediate.fence_context ||
291 job->vm_pd_addr != (*id)->pd_gpu_addr ||
292 updates || !(*id)->last_flush ||
293 ((*id)->last_flush->context != fence_context &&
294 !dma_fence_is_signaled((*id)->last_flush))) {
295 struct dma_fence *tmp;
297 /* Don't use per engine and per process VMID at the same time */
298 if (adev->vm_manager.concurrent_flush)
301 /* to prevent one context starved by another context */
302 (*id)->pd_gpu_addr = 0;
303 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
306 r = amdgpu_sync_fence(sync, tmp);
312 /* Good we can use this VMID. Remember this submission as
315 r = amdgpu_sync_fence(&(*id)->active, fence);
320 dma_fence_put((*id)->flushed_updates);
321 (*id)->flushed_updates = dma_fence_get(updates);
323 job->vm_needs_flush = needs_flush;
328 * amdgpu_vm_grab_used - try to reuse a VMID
330 * @vm: vm to allocate id for
331 * @ring: ring we want to submit job to
332 * @sync: sync object where we add dependencies
333 * @fence: fence protecting ID from reuse
334 * @job: job who wants to use the VMID
335 * @id: resulting VMID
337 * Try to reuse a VMID for this submission.
339 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
340 struct amdgpu_ring *ring,
341 struct amdgpu_sync *sync,
342 struct dma_fence *fence,
343 struct amdgpu_job *job,
344 struct amdgpu_vmid **id)
346 struct amdgpu_device *adev = ring->adev;
347 unsigned vmhub = ring->funcs->vmhub;
348 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
349 uint64_t fence_context = adev->fence_context + ring->idx;
350 struct dma_fence *updates = sync->last_vm_update;
353 job->vm_needs_flush = vm->use_cpu_for_update;
355 /* Check if we can use a VMID already assigned to this VM */
356 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
357 bool needs_flush = vm->use_cpu_for_update;
358 struct dma_fence *flushed;
360 /* Check all the prerequisites to using this VMID */
361 if ((*id)->owner != vm->immediate.fence_context)
364 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
367 if (!(*id)->last_flush ||
368 ((*id)->last_flush->context != fence_context &&
369 !dma_fence_is_signaled((*id)->last_flush)))
372 flushed = (*id)->flushed_updates;
373 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
376 if (needs_flush && !adev->vm_manager.concurrent_flush)
379 /* Good, we can use this VMID. Remember this submission as
382 r = amdgpu_sync_fence(&(*id)->active, fence);
386 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
387 dma_fence_put((*id)->flushed_updates);
388 (*id)->flushed_updates = dma_fence_get(updates);
391 job->vm_needs_flush |= needs_flush;
400 * amdgpu_vm_grab_id - allocate the next free VMID
402 * @vm: vm to allocate id for
403 * @ring: ring we want to submit job to
404 * @sync: sync object where we add dependencies
405 * @fence: fence protecting ID from reuse
406 * @job: job who wants to use the VMID
408 * Allocate an id for the vm, adding fences to the sync obj as necessary.
410 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
411 struct amdgpu_sync *sync, struct dma_fence *fence,
412 struct amdgpu_job *job)
414 struct amdgpu_device *adev = ring->adev;
415 unsigned vmhub = ring->funcs->vmhub;
416 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
417 struct amdgpu_vmid *idle = NULL;
418 struct amdgpu_vmid *id = NULL;
421 mutex_lock(&id_mgr->lock);
422 r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
426 if (vm->reserved_vmid[vmhub]) {
427 r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
431 r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
436 struct dma_fence *updates = sync->last_vm_update;
438 /* Still no ID to use? Then use the idle one found earlier */
441 /* Remember this submission as user of the VMID */
442 r = amdgpu_sync_fence(&id->active, fence);
446 dma_fence_put(id->flushed_updates);
447 id->flushed_updates = dma_fence_get(updates);
448 job->vm_needs_flush = true;
451 list_move_tail(&id->list, &id_mgr->ids_lru);
454 id->pd_gpu_addr = job->vm_pd_addr;
455 id->owner = vm->immediate.fence_context;
457 if (job->vm_needs_flush) {
458 dma_fence_put(id->last_flush);
459 id->last_flush = NULL;
461 job->vmid = id - id_mgr->ids;
462 job->pasid = vm->pasid;
463 trace_amdgpu_vm_grab_id(vm, ring, job);
466 mutex_unlock(&id_mgr->lock);
470 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
471 struct amdgpu_vm *vm,
474 struct amdgpu_vmid_mgr *id_mgr;
475 struct amdgpu_vmid *idle;
478 id_mgr = &adev->vm_manager.id_mgr[vmhub];
479 mutex_lock(&id_mgr->lock);
480 if (vm->reserved_vmid[vmhub])
482 if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
483 AMDGPU_VM_MAX_RESERVED_VMID) {
484 DRM_ERROR("Over limitation of reserved vmid\n");
485 atomic_dec(&id_mgr->reserved_vmid_num);
489 /* Select the first entry VMID */
490 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
491 list_del_init(&idle->list);
492 vm->reserved_vmid[vmhub] = idle;
493 mutex_unlock(&id_mgr->lock);
497 mutex_unlock(&id_mgr->lock);
501 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
502 struct amdgpu_vm *vm,
505 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
507 mutex_lock(&id_mgr->lock);
508 if (vm->reserved_vmid[vmhub]) {
509 list_add(&vm->reserved_vmid[vmhub]->list,
511 vm->reserved_vmid[vmhub] = NULL;
512 atomic_dec(&id_mgr->reserved_vmid_num);
514 mutex_unlock(&id_mgr->lock);
518 * amdgpu_vmid_reset - reset VMID to zero
520 * @adev: amdgpu device structure
522 * @vmid: vmid number to use
524 * Reset saved GDW, GWS and OA to force switch on next flush.
526 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
529 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
530 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
532 mutex_lock(&id_mgr->lock);
540 mutex_unlock(&id_mgr->lock);
544 * amdgpu_vmid_reset_all - reset VMID to zero
546 * @adev: amdgpu device structure
548 * Reset VMID to force flush on next use
550 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
554 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
555 struct amdgpu_vmid_mgr *id_mgr =
556 &adev->vm_manager.id_mgr[i];
558 for (j = 1; j < id_mgr->num_ids; ++j)
559 amdgpu_vmid_reset(adev, i, j);
564 * amdgpu_vmid_mgr_init - init the VMID manager
566 * @adev: amdgpu_device pointer
568 * Initialize the VM manager structures
570 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
574 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
575 struct amdgpu_vmid_mgr *id_mgr =
576 &adev->vm_manager.id_mgr[i];
578 mutex_init(&id_mgr->lock);
579 INIT_LIST_HEAD(&id_mgr->ids_lru);
580 atomic_set(&id_mgr->reserved_vmid_num, 0);
582 /* manage only VMIDs not used by KFD */
583 id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
585 /* skip over VMID 0, since it is the system VM */
586 for (j = 1; j < id_mgr->num_ids; ++j) {
587 amdgpu_vmid_reset(adev, i, j);
588 amdgpu_sync_create(&id_mgr->ids[j].active);
589 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
595 * amdgpu_vmid_mgr_fini - cleanup VM manager
597 * @adev: amdgpu_device pointer
599 * Cleanup the VM manager and free resources.
601 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
605 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
606 struct amdgpu_vmid_mgr *id_mgr =
607 &adev->vm_manager.id_mgr[i];
609 mutex_destroy(&id_mgr->lock);
610 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
611 struct amdgpu_vmid *id = &id_mgr->ids[j];
613 amdgpu_sync_free(&id->active);
614 dma_fence_put(id->flushed_updates);
615 dma_fence_put(id->last_flush);
616 dma_fence_put(id->pasid_mapping);