2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
51 #include <drm/amdgpu_drm.h>
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
64 MODULE_IMPORT_NS("DMA_BUF");
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
70 struct ttm_resource *bo_mem);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
76 uint64_t size_in_page)
78 return ttm_range_man_init(&adev->mman.bdev, type,
83 * amdgpu_evict_flags - Compute placement flags
85 * @bo: The buffer object to evict
86 * @placement: Possible destination(s) for evicted BO
88 * Fill in placement data when ttm_bo_evict() is called
90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
91 struct ttm_placement *placement)
93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
94 struct amdgpu_bo *abo;
95 static const struct ttm_place placements = {
98 .mem_type = TTM_PL_SYSTEM,
102 /* Don't handle scatter gather BOs */
103 if (bo->type == ttm_bo_type_sg) {
104 placement->num_placement = 0;
108 /* Object isn't an AMDGPU object so ignore */
109 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
110 placement->placement = &placements;
111 placement->num_placement = 1;
115 abo = ttm_to_amdgpu_bo(bo);
116 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
117 placement->num_placement = 0;
121 switch (bo->resource->mem_type) {
125 case AMDGPU_PL_DOORBELL:
126 placement->num_placement = 0;
130 if (!adev->mman.buffer_funcs_enabled) {
131 /* Move to system memory */
132 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
134 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
135 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
136 amdgpu_res_cpu_visible(adev, bo->resource)) {
138 /* Try evicting to the CPU inaccessible part of VRAM
139 * first, but only set GTT as busy placement, so this
140 * BO will be evicted to GTT rather than causing other
141 * BOs to be evicted from VRAM
143 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
144 AMDGPU_GEM_DOMAIN_GTT |
145 AMDGPU_GEM_DOMAIN_CPU);
146 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
147 abo->placements[0].lpfn = 0;
148 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
150 /* Move to GTT memory */
151 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
152 AMDGPU_GEM_DOMAIN_CPU);
156 case AMDGPU_PL_PREEMPT:
158 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
161 *placement = abo->placement;
165 * amdgpu_ttm_map_buffer - Map memory into the GART windows
166 * @bo: buffer object to map
167 * @mem: memory object to map
168 * @mm_cur: range to map
169 * @window: which GART window to use
170 * @ring: DMA ring to use for the copy
171 * @tmz: if we should setup a TMZ enabled mapping
172 * @size: in number of bytes to map, out number of bytes mapped
173 * @addr: resulting address inside the MC address space
175 * Setup one of the GART windows to access a specific piece of memory or return
176 * the physical address for local memory.
178 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
179 struct ttm_resource *mem,
180 struct amdgpu_res_cursor *mm_cur,
181 unsigned int window, struct amdgpu_ring *ring,
182 bool tmz, uint64_t *size, uint64_t *addr)
184 struct amdgpu_device *adev = ring->adev;
185 unsigned int offset, num_pages, num_dw, num_bytes;
186 uint64_t src_addr, dst_addr;
187 struct amdgpu_job *job;
193 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
194 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
196 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
199 /* Map only what can't be accessed directly */
200 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
201 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
208 * If start begins at an offset inside the page, then adjust the size
209 * and addr accordingly
211 offset = mm_cur->start & ~PAGE_MASK;
213 num_pages = PFN_UP(*size + offset);
214 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
216 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
218 *addr = adev->gmc.gart_start;
219 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
220 AMDGPU_GPU_PAGE_SIZE;
223 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
224 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
226 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
227 AMDGPU_FENCE_OWNER_UNDEFINED,
228 num_dw * 4 + num_bytes,
229 AMDGPU_IB_POOL_DELAYED, &job);
233 src_addr = num_dw * 4;
234 src_addr += job->ibs[0].gpu_addr;
236 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
237 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
238 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
239 dst_addr, num_bytes, 0);
241 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
242 WARN_ON(job->ibs[0].length_dw > num_dw);
244 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
246 flags |= AMDGPU_PTE_TMZ;
248 cpu_addr = &job->ibs[0].ptr[num_dw];
250 if (mem->mem_type == TTM_PL_TT) {
251 dma_addr_t *dma_addr;
253 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
254 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
256 dma_addr_t dma_address;
258 dma_address = mm_cur->start;
259 dma_address += adev->vm_manager.vram_base_offset;
261 for (i = 0; i < num_pages; ++i) {
262 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
264 dma_address += PAGE_SIZE;
268 dma_fence_put(amdgpu_job_submit(job));
273 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
274 * @adev: amdgpu device
275 * @src: buffer/address where to read from
276 * @dst: buffer/address where to write to
277 * @size: number of bytes to copy
278 * @tmz: if a secure copy should be used
279 * @resv: resv object to sync to
280 * @f: Returns the last fence if multiple jobs are submitted.
282 * The function copies @size bytes from {src->mem + src->offset} to
283 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
284 * move and different for a BO to BO copy.
287 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
288 const struct amdgpu_copy_mem *src,
289 const struct amdgpu_copy_mem *dst,
290 uint64_t size, bool tmz,
291 struct dma_resv *resv,
292 struct dma_fence **f)
294 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
295 struct amdgpu_res_cursor src_mm, dst_mm;
296 struct dma_fence *fence = NULL;
298 uint32_t copy_flags = 0;
299 struct amdgpu_bo *abo_src, *abo_dst;
301 if (!adev->mman.buffer_funcs_enabled) {
302 DRM_ERROR("Trying to move memory with ring turned off.\n");
306 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
307 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
309 mutex_lock(&adev->mman.gtt_window_lock);
310 while (src_mm.remaining) {
311 uint64_t from, to, cur_size, tiling_flags;
312 uint32_t num_type, data_format, max_com;
313 struct dma_fence *next;
315 /* Never copy more than 256MiB at once to avoid a timeout */
316 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
318 /* Map src to window 0 and dst to window 1. */
319 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
320 0, ring, tmz, &cur_size, &from);
324 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
325 1, ring, tmz, &cur_size, &to);
329 abo_src = ttm_to_amdgpu_bo(src->bo);
330 abo_dst = ttm_to_amdgpu_bo(dst->bo);
332 copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
333 if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
334 (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
335 copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
336 if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
337 (dst->mem->mem_type == TTM_PL_VRAM)) {
338 copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
339 amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
340 max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
341 num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
342 data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
343 copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
344 AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
345 AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
348 r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
349 &next, false, true, copy_flags);
353 dma_fence_put(fence);
356 amdgpu_res_next(&src_mm, cur_size);
357 amdgpu_res_next(&dst_mm, cur_size);
360 mutex_unlock(&adev->mman.gtt_window_lock);
362 *f = dma_fence_get(fence);
363 dma_fence_put(fence);
368 * amdgpu_move_blit - Copy an entire buffer to another buffer
370 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
371 * help move buffers to and from VRAM.
373 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
375 struct ttm_resource *new_mem,
376 struct ttm_resource *old_mem)
378 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
379 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
380 struct amdgpu_copy_mem src, dst;
381 struct dma_fence *fence = NULL;
391 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
393 amdgpu_bo_encrypted(abo),
394 bo->base.resv, &fence);
398 /* clear the space being freed */
399 if (old_mem->mem_type == TTM_PL_VRAM &&
400 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
401 struct dma_fence *wipe_fence = NULL;
403 r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
407 } else if (wipe_fence) {
408 amdgpu_vram_mgr_set_cleared(bo->resource);
409 dma_fence_put(fence);
414 /* Always block for VM page tables before committing the new location */
415 if (bo->type == ttm_bo_type_kernel)
416 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
418 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
419 dma_fence_put(fence);
424 dma_fence_wait(fence, false);
425 dma_fence_put(fence);
430 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
431 * @adev: amdgpu device
432 * @res: the resource to check
434 * Returns: true if the full resource is CPU visible, false otherwise.
436 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
437 struct ttm_resource *res)
439 struct amdgpu_res_cursor cursor;
444 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
445 res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
448 if (res->mem_type != TTM_PL_VRAM)
451 amdgpu_res_first(res, 0, res->size, &cursor);
452 while (cursor.remaining) {
453 if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
455 amdgpu_res_next(&cursor, cursor.size);
462 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
464 * Called by amdgpu_bo_move()
466 static bool amdgpu_res_copyable(struct amdgpu_device *adev,
467 struct ttm_resource *mem)
469 if (!amdgpu_res_cpu_visible(adev, mem))
472 /* ttm_resource_ioremap only supports contiguous memory */
473 if (mem->mem_type == TTM_PL_VRAM &&
474 !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
481 * amdgpu_bo_move - Move a buffer object to a new memory location
483 * Called by ttm_bo_handle_move_mem()
485 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
486 struct ttm_operation_ctx *ctx,
487 struct ttm_resource *new_mem,
488 struct ttm_place *hop)
490 struct amdgpu_device *adev;
491 struct amdgpu_bo *abo;
492 struct ttm_resource *old_mem = bo->resource;
495 if (new_mem->mem_type == TTM_PL_TT ||
496 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
497 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
502 abo = ttm_to_amdgpu_bo(bo);
503 adev = amdgpu_ttm_adev(bo->bdev);
505 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
507 amdgpu_bo_move_notify(bo, evict, new_mem);
508 ttm_bo_move_null(bo, new_mem);
511 if (old_mem->mem_type == TTM_PL_SYSTEM &&
512 (new_mem->mem_type == TTM_PL_TT ||
513 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
514 amdgpu_bo_move_notify(bo, evict, new_mem);
515 ttm_bo_move_null(bo, new_mem);
518 if ((old_mem->mem_type == TTM_PL_TT ||
519 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
520 new_mem->mem_type == TTM_PL_SYSTEM) {
521 r = ttm_bo_wait_ctx(bo, ctx);
525 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
526 amdgpu_bo_move_notify(bo, evict, new_mem);
527 ttm_resource_free(bo, &bo->resource);
528 ttm_bo_assign_mem(bo, new_mem);
532 if (old_mem->mem_type == AMDGPU_PL_GDS ||
533 old_mem->mem_type == AMDGPU_PL_GWS ||
534 old_mem->mem_type == AMDGPU_PL_OA ||
535 old_mem->mem_type == AMDGPU_PL_DOORBELL ||
536 new_mem->mem_type == AMDGPU_PL_GDS ||
537 new_mem->mem_type == AMDGPU_PL_GWS ||
538 new_mem->mem_type == AMDGPU_PL_OA ||
539 new_mem->mem_type == AMDGPU_PL_DOORBELL) {
540 /* Nothing to save here */
541 amdgpu_bo_move_notify(bo, evict, new_mem);
542 ttm_bo_move_null(bo, new_mem);
546 if (bo->type == ttm_bo_type_device &&
547 new_mem->mem_type == TTM_PL_VRAM &&
548 old_mem->mem_type != TTM_PL_VRAM) {
549 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
550 * accesses the BO after it's moved.
552 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
555 if (adev->mman.buffer_funcs_enabled &&
556 ((old_mem->mem_type == TTM_PL_SYSTEM &&
557 new_mem->mem_type == TTM_PL_VRAM) ||
558 (old_mem->mem_type == TTM_PL_VRAM &&
559 new_mem->mem_type == TTM_PL_SYSTEM))) {
562 hop->mem_type = TTM_PL_TT;
563 hop->flags = TTM_PL_FLAG_TEMPORARY;
567 amdgpu_bo_move_notify(bo, evict, new_mem);
568 if (adev->mman.buffer_funcs_enabled)
569 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
574 /* Check that all memory is CPU accessible */
575 if (!amdgpu_res_copyable(adev, old_mem) ||
576 !amdgpu_res_copyable(adev, new_mem)) {
577 pr_err("Move buffer fallback to memcpy unavailable\n");
581 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
586 /* update statistics after the move */
588 atomic64_inc(&adev->num_evictions);
589 atomic64_add(bo->base.size, &adev->num_bytes_moved);
594 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
596 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
598 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
599 struct ttm_resource *mem)
601 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
603 switch (mem->mem_type) {
608 case AMDGPU_PL_PREEMPT:
611 mem->bus.offset = mem->start << PAGE_SHIFT;
613 if (adev->mman.aper_base_kaddr &&
614 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
615 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
618 mem->bus.offset += adev->gmc.aper_base;
619 mem->bus.is_iomem = true;
621 case AMDGPU_PL_DOORBELL:
622 mem->bus.offset = mem->start << PAGE_SHIFT;
623 mem->bus.offset += adev->doorbell.base;
624 mem->bus.is_iomem = true;
625 mem->bus.caching = ttm_uncached;
633 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
634 unsigned long page_offset)
636 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
637 struct amdgpu_res_cursor cursor;
639 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
642 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
643 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
645 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
649 * amdgpu_ttm_domain_start - Returns GPU start address
650 * @adev: amdgpu device object
651 * @type: type of the memory
654 * GPU start address of a memory domain
657 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
661 return adev->gmc.gart_start;
663 return adev->gmc.vram_start;
670 * TTM backend functions.
672 struct amdgpu_ttm_tt {
674 struct drm_gem_object *gobj;
677 struct task_struct *usertask;
683 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
685 #ifdef CONFIG_DRM_AMDGPU_USERPTR
687 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
688 * memory and start HMM tracking CPU page table update
690 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
691 * once afterwards to stop HMM tracking
693 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
694 struct hmm_range **range)
696 struct ttm_tt *ttm = bo->tbo.ttm;
697 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
698 unsigned long start = gtt->userptr;
699 struct vm_area_struct *vma;
700 struct mm_struct *mm;
704 /* Make sure get_user_pages_done() can cleanup gracefully */
707 mm = bo->notifier.mm;
709 DRM_DEBUG_DRIVER("BO is not registered?\n");
713 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
717 vma = vma_lookup(mm, start);
718 if (unlikely(!vma)) {
722 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
728 readonly = amdgpu_ttm_tt_is_readonly(ttm);
729 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
730 readonly, NULL, pages, range);
732 mmap_read_unlock(mm);
734 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
741 /* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
743 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
744 struct hmm_range *range)
746 struct amdgpu_ttm_tt *gtt = (void *)ttm;
748 if (gtt && gtt->userptr && range)
749 amdgpu_hmm_range_get_pages_done(range);
753 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
754 * Check if the pages backing this ttm range have been invalidated
756 * Returns: true if pages are still valid
758 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
759 struct hmm_range *range)
761 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
763 if (!gtt || !gtt->userptr || !range)
766 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
767 gtt->userptr, ttm->num_pages);
769 WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
771 return !amdgpu_hmm_range_get_pages_done(range);
776 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
778 * Called by amdgpu_cs_list_validate(). This creates the page list
779 * that backs user memory and will ultimately be mapped into the device
782 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
786 for (i = 0; i < ttm->num_pages; ++i)
787 ttm->pages[i] = pages ? pages[i] : NULL;
791 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
793 * Called by amdgpu_ttm_backend_bind()
795 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
798 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
799 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
800 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
801 enum dma_data_direction direction = write ?
802 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
805 /* Allocate an SG array and squash pages into it */
806 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
807 (u64)ttm->num_pages << PAGE_SHIFT,
812 /* Map SG to device */
813 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
815 goto release_sg_table;
817 /* convert SG to linear array of pages and dma addresses */
818 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
824 sg_free_table(ttm->sg);
832 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
834 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
837 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
838 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
839 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
840 enum dma_data_direction direction = write ?
841 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
843 /* double check that we don't free the table twice */
844 if (!ttm->sg || !ttm->sg->sgl)
847 /* unmap the pages mapped to the device */
848 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
849 sg_free_table(ttm->sg);
853 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
854 * MQDn+CtrlStackn where n is the number of XCCs per partition.
855 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
856 * and uses memory type default, UC. The rest of pages_per_xcc are
857 * Ctrl stack and modify their memory type to NC.
859 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
860 struct ttm_tt *ttm, uint64_t flags)
862 struct amdgpu_ttm_tt *gtt = (void *)ttm;
863 uint64_t total_pages = ttm->num_pages;
864 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
865 uint64_t page_idx, pages_per_xcc;
867 uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC);
869 pages_per_xcc = total_pages;
870 do_div(pages_per_xcc, num_xcc);
872 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
873 /* MQD page: use default flags */
874 amdgpu_gart_bind(adev,
875 gtt->offset + (page_idx << PAGE_SHIFT),
876 1, >t->ttm.dma_address[page_idx], flags);
878 * Ctrl pages - modify the memory type to NC (ctrl_flags) from
879 * the second page of the BO onward.
881 amdgpu_gart_bind(adev,
882 gtt->offset + ((page_idx + 1) << PAGE_SHIFT),
884 >t->ttm.dma_address[page_idx + 1],
889 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
890 struct ttm_buffer_object *tbo,
893 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
894 struct ttm_tt *ttm = tbo->ttm;
895 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
897 if (amdgpu_bo_encrypted(abo))
898 flags |= AMDGPU_PTE_TMZ;
900 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
901 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
903 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
904 gtt->ttm.dma_address, flags);
910 * amdgpu_ttm_backend_bind - Bind GTT memory
912 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
913 * This handles binding GTT memory to the device address space.
915 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
917 struct ttm_resource *bo_mem)
919 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
920 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
931 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
933 DRM_ERROR("failed to pin userptr\n");
936 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
938 struct dma_buf_attachment *attach;
939 struct sg_table *sgt;
941 attach = gtt->gobj->import_attach;
942 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
949 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
953 if (!ttm->num_pages) {
954 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
955 ttm->num_pages, bo_mem, ttm);
958 if (bo_mem->mem_type != TTM_PL_TT ||
959 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
960 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
964 /* compute PTE flags relevant to this BO memory */
965 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
967 /* bind pages into GART page tables */
968 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
969 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
970 gtt->ttm.dma_address, flags);
976 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
977 * through AGP or GART aperture.
979 * If bo is accessible through AGP aperture, then use AGP aperture
980 * to access bo; otherwise allocate logical space in GART aperture
981 * and map bo to GART aperture.
983 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
985 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
986 struct ttm_operation_ctx ctx = { false, false };
987 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
988 struct ttm_placement placement;
989 struct ttm_place placements;
990 struct ttm_resource *tmp;
991 uint64_t addr, flags;
994 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
997 addr = amdgpu_gmc_agp_addr(bo);
998 if (addr != AMDGPU_BO_INVALID_OFFSET)
1001 /* allocate GART space */
1002 placement.num_placement = 1;
1003 placement.placement = &placements;
1004 placements.fpfn = 0;
1005 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1006 placements.mem_type = TTM_PL_TT;
1007 placements.flags = bo->resource->placement;
1009 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1013 /* compute PTE flags for this buffer object */
1014 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
1017 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
1018 amdgpu_ttm_gart_bind(adev, bo, flags);
1019 amdgpu_gart_invalidate_tlb(adev);
1020 ttm_resource_free(bo, &bo->resource);
1021 ttm_bo_assign_mem(bo, tmp);
1027 * amdgpu_ttm_recover_gart - Rebind GTT pages
1029 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1030 * rebind GTT pages during a GPU reset.
1032 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1034 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1040 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1041 amdgpu_ttm_gart_bind(adev, tbo, flags);
1045 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1047 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1050 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1053 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1054 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1056 /* if the pages have userptr pinning then clear that first */
1058 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1059 } else if (ttm->sg && gtt->gobj->import_attach) {
1060 struct dma_buf_attachment *attach;
1062 attach = gtt->gobj->import_attach;
1063 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1070 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1073 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1074 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1078 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1081 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1084 put_task_struct(gtt->usertask);
1086 ttm_tt_fini(>t->ttm);
1091 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1093 * @bo: The buffer object to create a GTT ttm_tt object around
1094 * @page_flags: Page flags to be added to the ttm_tt object
1096 * Called by ttm_tt_create().
1098 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1099 uint32_t page_flags)
1101 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1102 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1103 struct amdgpu_ttm_tt *gtt;
1104 enum ttm_caching caching;
1106 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1110 gtt->gobj = &bo->base;
1111 if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1112 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1114 gtt->pool_id = abo->xcp_id;
1116 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1117 caching = ttm_write_combined;
1119 caching = ttm_cached;
1121 /* allocate space for the uninitialized page entries */
1122 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1130 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1132 * Map the pages of a ttm_tt object to an address space visible
1133 * to the underlying device.
1135 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1137 struct ttm_operation_ctx *ctx)
1139 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1140 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1141 struct ttm_pool *pool;
1145 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1147 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1153 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1156 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1157 pool = &adev->mman.ttm_pools[gtt->pool_id];
1159 pool = &adev->mman.bdev.pool;
1160 ret = ttm_pool_alloc(pool, ttm, ctx);
1164 for (i = 0; i < ttm->num_pages; ++i)
1165 ttm->pages[i]->mapping = bdev->dev_mapping;
1171 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1173 * Unmaps pages of a ttm_tt object from the device address space and
1174 * unpopulates the page array backing it.
1176 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1179 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1180 struct amdgpu_device *adev;
1181 struct ttm_pool *pool;
1184 amdgpu_ttm_backend_unbind(bdev, ttm);
1187 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1193 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1196 for (i = 0; i < ttm->num_pages; ++i)
1197 ttm->pages[i]->mapping = NULL;
1199 adev = amdgpu_ttm_adev(bdev);
1201 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1202 pool = &adev->mman.ttm_pools[gtt->pool_id];
1204 pool = &adev->mman.bdev.pool;
1206 return ttm_pool_free(pool, ttm);
1210 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1213 * @tbo: The ttm_buffer_object that contains the userptr
1214 * @user_addr: The returned value
1216 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1217 uint64_t *user_addr)
1219 struct amdgpu_ttm_tt *gtt;
1224 gtt = (void *)tbo->ttm;
1225 *user_addr = gtt->userptr;
1230 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1233 * @bo: The ttm_buffer_object to bind this userptr to
1234 * @addr: The address in the current tasks VM space to use
1235 * @flags: Requirements of userptr object.
1237 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1238 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1239 * initialize GPU VM for a KFD process.
1241 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1242 uint64_t addr, uint32_t flags)
1244 struct amdgpu_ttm_tt *gtt;
1247 /* TODO: We want a separate TTM object type for userptrs */
1248 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1249 if (bo->ttm == NULL)
1253 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1254 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1256 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1257 gtt->userptr = addr;
1258 gtt->userflags = flags;
1261 put_task_struct(gtt->usertask);
1262 gtt->usertask = current->group_leader;
1263 get_task_struct(gtt->usertask);
1269 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1271 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1273 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1278 if (gtt->usertask == NULL)
1281 return gtt->usertask->mm;
1285 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1286 * address range for the current task.
1289 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1290 unsigned long end, unsigned long *userptr)
1292 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1295 if (gtt == NULL || !gtt->userptr)
1298 /* Return false if no part of the ttm_tt object lies within
1301 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1302 if (gtt->userptr > end || gtt->userptr + size <= start)
1306 *userptr = gtt->userptr;
1311 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1313 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1315 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1317 if (gtt == NULL || !gtt->userptr)
1324 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1326 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1328 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1333 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1337 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1339 * @ttm: The ttm_tt object to compute the flags for
1340 * @mem: The memory registry backing this ttm_tt object
1342 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1344 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1348 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1349 flags |= AMDGPU_PTE_VALID;
1351 if (mem && (mem->mem_type == TTM_PL_TT ||
1352 mem->mem_type == AMDGPU_PL_DOORBELL ||
1353 mem->mem_type == AMDGPU_PL_PREEMPT)) {
1354 flags |= AMDGPU_PTE_SYSTEM;
1356 if (ttm->caching == ttm_cached)
1357 flags |= AMDGPU_PTE_SNOOPED;
1360 if (mem && mem->mem_type == TTM_PL_VRAM &&
1361 mem->bus.caching == ttm_cached)
1362 flags |= AMDGPU_PTE_SNOOPED;
1368 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1370 * @adev: amdgpu_device pointer
1371 * @ttm: The ttm_tt object to compute the flags for
1372 * @mem: The memory registry backing this ttm_tt object
1374 * Figure out the flags to use for a VM PTE (Page Table Entry).
1376 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1377 struct ttm_resource *mem)
1379 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1381 flags |= adev->gart.gart_pte_flags;
1382 flags |= AMDGPU_PTE_READABLE;
1384 if (!amdgpu_ttm_tt_is_readonly(ttm))
1385 flags |= AMDGPU_PTE_WRITEABLE;
1391 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1394 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1395 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1396 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1397 * used to clean out a memory space.
1399 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1400 const struct ttm_place *place)
1402 struct dma_resv_iter resv_cursor;
1403 struct dma_fence *f;
1405 if (!amdgpu_bo_is_amdgpu_bo(bo))
1406 return ttm_bo_eviction_valuable(bo, place);
1409 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1412 if (bo->type == ttm_bo_type_kernel &&
1413 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1416 /* If bo is a KFD BO, check if the bo belongs to the current process.
1417 * If true, then return false as any KFD process needs all its BOs to
1418 * be resident to run successfully
1420 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1421 DMA_RESV_USAGE_BOOKKEEP, f) {
1422 if (amdkfd_fence_check_mm(f, current->mm) &&
1423 !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
1427 /* Preemptible BOs don't own system resources managed by the
1428 * driver (pages, VRAM, GART space). They point to resources
1429 * owned by someone else (e.g. pageable memory in user mode
1430 * or a DMABuf). They are used in a preemptible context so we
1431 * can guarantee no deadlocks and good QoS in case of MMU
1432 * notifiers or DMABuf move notifiers from the resource owner.
1434 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1437 if (bo->resource->mem_type == TTM_PL_TT &&
1438 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1441 return ttm_bo_eviction_valuable(bo, place);
1444 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1445 void *buf, size_t size, bool write)
1448 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1449 uint64_t bytes = 4 - (pos & 0x3);
1450 uint32_t shift = (pos & 0x3) * 8;
1451 uint32_t mask = 0xffffffff << shift;
1455 mask &= 0xffffffff >> (bytes - size) * 8;
1459 if (mask != 0xffffffff) {
1460 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1463 value |= (*(uint32_t *)buf << shift) & mask;
1464 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1466 value = (value & mask) >> shift;
1467 memcpy(buf, &value, bytes);
1470 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1479 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1480 unsigned long offset, void *buf,
1483 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1484 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1485 struct amdgpu_res_cursor src_mm;
1486 struct amdgpu_job *job;
1487 struct dma_fence *fence;
1488 uint64_t src_addr, dst_addr;
1489 unsigned int num_dw;
1492 if (len != PAGE_SIZE)
1495 if (!adev->mman.sdma_access_ptr)
1498 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1502 memcpy(adev->mman.sdma_access_ptr, buf, len);
1504 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1505 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
1506 AMDGPU_FENCE_OWNER_UNDEFINED,
1507 num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1512 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1513 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1515 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1517 swap(src_addr, dst_addr);
1519 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1522 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1523 WARN_ON(job->ibs[0].length_dw > num_dw);
1525 fence = amdgpu_job_submit(job);
1527 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1529 dma_fence_put(fence);
1532 memcpy(buf, adev->mman.sdma_access_ptr, len);
1539 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1541 * @bo: The buffer object to read/write
1542 * @offset: Offset into buffer object
1543 * @buf: Secondary buffer to write/read from
1544 * @len: Length in bytes of access
1545 * @write: true if writing
1547 * This is used to access VRAM that backs a buffer object via MMIO
1548 * access for debugging purposes.
1550 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1551 unsigned long offset, void *buf, int len,
1554 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1555 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1556 struct amdgpu_res_cursor cursor;
1559 if (bo->resource->mem_type != TTM_PL_VRAM)
1562 if (amdgpu_device_has_timeouts_enabled(adev) &&
1563 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1566 amdgpu_res_first(bo->resource, offset, len, &cursor);
1567 while (cursor.remaining) {
1568 size_t count, size = cursor.size;
1569 loff_t pos = cursor.start;
1571 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1574 /* using MM to access rest vram and handle un-aligned address */
1577 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1582 amdgpu_res_next(&cursor, cursor.size);
1589 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1591 amdgpu_bo_move_notify(bo, false, NULL);
1594 static struct ttm_device_funcs amdgpu_bo_driver = {
1595 .ttm_tt_create = &amdgpu_ttm_tt_create,
1596 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1597 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1598 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1599 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1600 .evict_flags = &amdgpu_evict_flags,
1601 .move = &amdgpu_bo_move,
1602 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1603 .release_notify = &amdgpu_bo_release_notify,
1604 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1605 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1606 .access_memory = &amdgpu_ttm_access_memory,
1610 * Firmware Reservation functions
1613 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1615 * @adev: amdgpu_device pointer
1617 * free fw reserved vram if it has been reserved.
1619 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1621 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1622 NULL, &adev->mman.fw_vram_usage_va);
1626 * Driver Reservation functions
1629 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1631 * @adev: amdgpu_device pointer
1633 * free drv reserved vram if it has been reserved.
1635 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1637 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1639 &adev->mman.drv_vram_usage_va);
1643 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1645 * @adev: amdgpu_device pointer
1647 * create bo vram reservation from fw.
1649 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1651 uint64_t vram_size = adev->gmc.visible_vram_size;
1653 adev->mman.fw_vram_usage_va = NULL;
1654 adev->mman.fw_vram_usage_reserved_bo = NULL;
1656 if (adev->mman.fw_vram_usage_size == 0 ||
1657 adev->mman.fw_vram_usage_size > vram_size)
1660 return amdgpu_bo_create_kernel_at(adev,
1661 adev->mman.fw_vram_usage_start_offset,
1662 adev->mman.fw_vram_usage_size,
1663 &adev->mman.fw_vram_usage_reserved_bo,
1664 &adev->mman.fw_vram_usage_va);
1668 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1670 * @adev: amdgpu_device pointer
1672 * create bo vram reservation from drv.
1674 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1676 u64 vram_size = adev->gmc.visible_vram_size;
1678 adev->mman.drv_vram_usage_va = NULL;
1679 adev->mman.drv_vram_usage_reserved_bo = NULL;
1681 if (adev->mman.drv_vram_usage_size == 0 ||
1682 adev->mman.drv_vram_usage_size > vram_size)
1685 return amdgpu_bo_create_kernel_at(adev,
1686 adev->mman.drv_vram_usage_start_offset,
1687 adev->mman.drv_vram_usage_size,
1688 &adev->mman.drv_vram_usage_reserved_bo,
1689 &adev->mman.drv_vram_usage_va);
1693 * Memoy training reservation functions
1697 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1699 * @adev: amdgpu_device pointer
1701 * free memory training reserved vram if it has been reserved.
1703 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1705 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1707 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1708 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1714 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
1715 uint32_t reserve_size)
1717 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1719 memset(ctx, 0, sizeof(*ctx));
1721 ctx->c2p_train_data_offset =
1722 ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1723 ctx->p2c_train_data_offset =
1724 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1725 ctx->train_data_size =
1726 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1728 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1729 ctx->train_data_size,
1730 ctx->p2c_train_data_offset,
1731 ctx->c2p_train_data_offset);
1735 * reserve TMR memory at the top of VRAM which holds
1736 * IP Discovery data and is protected by PSP.
1738 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1740 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1741 bool mem_train_support = false;
1742 uint32_t reserve_size = 0;
1745 if (adev->bios && !amdgpu_sriov_vf(adev)) {
1746 if (amdgpu_atomfirmware_mem_training_supported(adev))
1747 mem_train_support = true;
1749 DRM_DEBUG("memory training does not support!\n");
1753 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1754 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1756 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1757 * discovery data and G6 memory training data respectively
1761 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1764 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1765 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1766 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
1767 reserve_size = max(reserve_size, (uint32_t)280 << 20);
1768 else if (!reserve_size)
1769 reserve_size = DISCOVERY_TMR_OFFSET;
1771 if (mem_train_support) {
1772 /* reserve vram for mem train according to TMR location */
1773 amdgpu_ttm_training_data_block_init(adev, reserve_size);
1774 ret = amdgpu_bo_create_kernel_at(adev,
1775 ctx->c2p_train_data_offset,
1776 ctx->train_data_size,
1780 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1781 amdgpu_ttm_training_reserve_vram_fini(adev);
1784 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1787 if (!adev->gmc.is_app_apu) {
1788 ret = amdgpu_bo_create_kernel_at(
1789 adev, adev->gmc.real_vram_size - reserve_size,
1790 reserve_size, &adev->mman.fw_reserved_memory, NULL);
1792 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1793 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
1798 DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
1804 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1808 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1811 adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions,
1812 sizeof(*adev->mman.ttm_pools),
1814 if (!adev->mman.ttm_pools)
1817 for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1818 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1819 adev->gmc.mem_partitions[i].numa.node,
1825 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1829 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1832 for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1833 ttm_pool_fini(&adev->mman.ttm_pools[i]);
1835 kfree(adev->mman.ttm_pools);
1836 adev->mman.ttm_pools = NULL;
1840 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1841 * gtt/vram related fields.
1843 * This initializes all of the memory space pools that the TTM layer
1844 * will need such as the GTT space (system memory mapped to the device),
1845 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1846 * can be mapped per VMID.
1848 int amdgpu_ttm_init(struct amdgpu_device *adev)
1853 mutex_init(&adev->mman.gtt_window_lock);
1855 dma_set_max_seg_size(adev->dev, UINT_MAX);
1856 /* No others user of address space so set it to 0 */
1857 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1858 adev_to_drm(adev)->anon_inode->i_mapping,
1859 adev_to_drm(adev)->vma_offset_manager,
1861 dma_addressing_limited(adev->dev));
1863 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1867 r = amdgpu_ttm_pools_init(adev);
1869 DRM_ERROR("failed to init ttm pools(%d).\n", r);
1872 adev->mman.initialized = true;
1874 /* Initialize VRAM pool with all of VRAM divided into pages */
1875 r = amdgpu_vram_mgr_init(adev);
1877 DRM_ERROR("Failed initializing VRAM heap.\n");
1881 /* Change the size here instead of the init above so only lpfn is affected */
1882 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1885 if (adev->gmc.xgmi.connected_to_cpu)
1886 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1887 adev->gmc.visible_vram_size);
1889 else if (adev->gmc.is_app_apu)
1891 "No need to ioremap when real vram size is 0\n");
1894 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1895 adev->gmc.visible_vram_size);
1899 *The reserved vram for firmware must be pinned to the specified
1900 *place on the VRAM, so reserve it early.
1902 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1907 *The reserved vram for driver must be pinned to the specified
1908 *place on the VRAM, so reserve it early.
1910 r = amdgpu_ttm_drv_reserve_vram_init(adev);
1915 * only NAVI10 and onwards ASIC support for IP discovery.
1916 * If IP discovery enabled, a block of memory should be
1917 * reserved for IP discovey.
1919 if (adev->mman.discovery_bin) {
1920 r = amdgpu_ttm_reserve_tmr(adev);
1925 /* allocate memory as required for VGA
1926 * This is used for VGA emulation and pre-OS scanout buffers to
1927 * avoid display artifacts while transitioning between pre-OS
1930 if (!adev->gmc.is_app_apu) {
1931 r = amdgpu_bo_create_kernel_at(adev, 0,
1932 adev->mman.stolen_vga_size,
1933 &adev->mman.stolen_vga_memory,
1938 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1939 adev->mman.stolen_extended_size,
1940 &adev->mman.stolen_extended_memory,
1946 r = amdgpu_bo_create_kernel_at(adev,
1947 adev->mman.stolen_reserved_offset,
1948 adev->mman.stolen_reserved_size,
1949 &adev->mman.stolen_reserved_memory,
1954 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
1957 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1958 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
1960 /* Compute GTT size, either based on TTM limit
1961 * or whatever the user passed on module init.
1963 if (amdgpu_gtt_size == -1)
1964 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
1966 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1968 /* Initialize GTT memory pool */
1969 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1971 DRM_ERROR("Failed initializing GTT heap.\n");
1974 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1975 (unsigned int)(gtt_size / (1024 * 1024)));
1977 /* Initialize doorbell pool on PCI BAR */
1978 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
1980 DRM_ERROR("Failed initializing doorbell heap.\n");
1984 /* Create a boorbell page for kernel usages */
1985 r = amdgpu_doorbell_create_kernel_doorbells(adev);
1987 DRM_ERROR("Failed to initialize kernel doorbells.\n");
1991 /* Initialize preemptible memory pool */
1992 r = amdgpu_preempt_mgr_init(adev);
1994 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1998 /* Initialize various on-chip memory pools */
1999 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
2001 DRM_ERROR("Failed initializing GDS heap.\n");
2005 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
2007 DRM_ERROR("Failed initializing gws heap.\n");
2011 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
2013 DRM_ERROR("Failed initializing oa heap.\n");
2016 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
2017 AMDGPU_GEM_DOMAIN_GTT,
2018 &adev->mman.sdma_access_bo, NULL,
2019 &adev->mman.sdma_access_ptr))
2020 DRM_WARN("Debug VRAM access will use slowpath MM access\n");
2026 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2028 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2032 if (!adev->mman.initialized)
2035 amdgpu_ttm_pools_fini(adev);
2037 amdgpu_ttm_training_reserve_vram_fini(adev);
2038 /* return the stolen vga memory back to VRAM */
2039 if (!adev->gmc.is_app_apu) {
2040 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2041 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
2042 /* return the FW reserved memory back to VRAM */
2043 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
2045 if (adev->mman.stolen_reserved_size)
2046 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
2049 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2050 &adev->mman.sdma_access_ptr);
2051 amdgpu_ttm_fw_reserve_vram_fini(adev);
2052 amdgpu_ttm_drv_reserve_vram_fini(adev);
2054 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2056 if (adev->mman.aper_base_kaddr)
2057 iounmap(adev->mman.aper_base_kaddr);
2058 adev->mman.aper_base_kaddr = NULL;
2063 amdgpu_vram_mgr_fini(adev);
2064 amdgpu_gtt_mgr_fini(adev);
2065 amdgpu_preempt_mgr_fini(adev);
2066 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2067 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2068 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2069 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
2070 ttm_device_fini(&adev->mman.bdev);
2071 adev->mman.initialized = false;
2072 DRM_INFO("amdgpu: ttm finalized\n");
2076 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2078 * @adev: amdgpu_device pointer
2079 * @enable: true when we can use buffer functions.
2081 * Enable/disable use of buffer functions during suspend/resume. This should
2082 * only be called at bootup or when userspace isn't running.
2084 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2086 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2090 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2091 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2095 struct amdgpu_ring *ring;
2096 struct drm_gpu_scheduler *sched;
2098 ring = adev->mman.buffer_funcs_ring;
2099 sched = &ring->sched;
2100 r = drm_sched_entity_init(&adev->mman.high_pr,
2101 DRM_SCHED_PRIORITY_KERNEL, &sched,
2104 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2109 r = drm_sched_entity_init(&adev->mman.low_pr,
2110 DRM_SCHED_PRIORITY_NORMAL, &sched,
2113 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2115 goto error_free_entity;
2118 drm_sched_entity_destroy(&adev->mman.high_pr);
2119 drm_sched_entity_destroy(&adev->mman.low_pr);
2120 dma_fence_put(man->move);
2124 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2126 size = adev->gmc.real_vram_size;
2128 size = adev->gmc.visible_vram_size;
2130 adev->mman.buffer_funcs_enabled = enable;
2135 drm_sched_entity_destroy(&adev->mman.high_pr);
2138 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2140 unsigned int num_dw,
2141 struct dma_resv *resv,
2142 bool vm_needs_flush,
2143 struct amdgpu_job **job,
2146 enum amdgpu_ib_pool_type pool = direct_submit ?
2147 AMDGPU_IB_POOL_DIRECT :
2148 AMDGPU_IB_POOL_DELAYED;
2150 struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
2151 &adev->mman.high_pr;
2152 r = amdgpu_job_alloc_with_ib(adev, entity,
2153 AMDGPU_FENCE_OWNER_UNDEFINED,
2154 num_dw * 4, pool, job);
2158 if (vm_needs_flush) {
2159 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2162 (*job)->vm_needs_flush = true;
2167 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2168 DMA_RESV_USAGE_BOOKKEEP);
2171 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2172 uint64_t dst_offset, uint32_t byte_count,
2173 struct dma_resv *resv,
2174 struct dma_fence **fence, bool direct_submit,
2175 bool vm_needs_flush, uint32_t copy_flags)
2177 struct amdgpu_device *adev = ring->adev;
2178 unsigned int num_loops, num_dw;
2179 struct amdgpu_job *job;
2184 if (!direct_submit && !ring->sched.ready) {
2185 DRM_ERROR("Trying to move memory with ring turned off.\n");
2189 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2190 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2191 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2192 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2193 resv, vm_needs_flush, &job, false);
2197 for (i = 0; i < num_loops; i++) {
2198 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2200 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2201 dst_offset, cur_size_in_bytes, copy_flags);
2202 src_offset += cur_size_in_bytes;
2203 dst_offset += cur_size_in_bytes;
2204 byte_count -= cur_size_in_bytes;
2207 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2208 WARN_ON(job->ibs[0].length_dw > num_dw);
2210 r = amdgpu_job_submit_direct(job, ring, fence);
2212 *fence = amdgpu_job_submit(job);
2219 amdgpu_job_free(job);
2220 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2224 static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2225 uint64_t dst_addr, uint32_t byte_count,
2226 struct dma_resv *resv,
2227 struct dma_fence **fence,
2228 bool vm_needs_flush, bool delayed)
2230 struct amdgpu_device *adev = ring->adev;
2231 unsigned int num_loops, num_dw;
2232 struct amdgpu_job *job;
2237 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2238 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2239 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2240 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2245 for (i = 0; i < num_loops; i++) {
2246 uint32_t cur_size = min(byte_count, max_bytes);
2248 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2251 dst_addr += cur_size;
2252 byte_count -= cur_size;
2255 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2256 WARN_ON(job->ibs[0].length_dw > num_dw);
2257 *fence = amdgpu_job_submit(job);
2262 * amdgpu_ttm_clear_buffer - clear memory buffers
2263 * @bo: amdgpu buffer object
2264 * @resv: reservation object
2265 * @fence: dma_fence associated with the operation
2267 * Clear the memory buffer resource.
2270 * 0 for success or a negative error code on failure.
2272 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
2273 struct dma_resv *resv,
2274 struct dma_fence **fence)
2276 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2277 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2278 struct amdgpu_res_cursor cursor;
2282 if (!adev->mman.buffer_funcs_enabled)
2288 *fence = dma_fence_get_stub();
2290 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
2292 mutex_lock(&adev->mman.gtt_window_lock);
2293 while (cursor.remaining) {
2294 struct dma_fence *next = NULL;
2297 if (amdgpu_res_cleared(&cursor)) {
2298 amdgpu_res_next(&cursor, cursor.size);
2302 /* Never clear more than 256MiB at once to avoid timeouts */
2303 size = min(cursor.size, 256ULL << 20);
2305 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
2306 1, ring, false, &size, &addr);
2310 r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
2315 dma_fence_put(*fence);
2318 amdgpu_res_next(&cursor, size);
2321 mutex_unlock(&adev->mman.gtt_window_lock);
2326 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2328 struct dma_resv *resv,
2329 struct dma_fence **f,
2332 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2333 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2334 struct dma_fence *fence = NULL;
2335 struct amdgpu_res_cursor dst;
2338 if (!adev->mman.buffer_funcs_enabled) {
2339 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2343 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2345 mutex_lock(&adev->mman.gtt_window_lock);
2346 while (dst.remaining) {
2347 struct dma_fence *next;
2348 uint64_t cur_size, to;
2350 /* Never fill more than 256MiB at once to avoid timeouts */
2351 cur_size = min(dst.size, 256ULL << 20);
2353 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2354 1, ring, false, &cur_size, &to);
2358 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2359 &next, true, delayed);
2363 dma_fence_put(fence);
2366 amdgpu_res_next(&dst, cur_size);
2369 mutex_unlock(&adev->mman.gtt_window_lock);
2371 *f = dma_fence_get(fence);
2372 dma_fence_put(fence);
2377 * amdgpu_ttm_evict_resources - evict memory buffers
2378 * @adev: amdgpu device object
2379 * @mem_type: evicted BO's memory type
2381 * Evicts all @mem_type buffers on the lru list of the memory type.
2384 * 0 for success or a negative error code on failure.
2386 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2388 struct ttm_resource_manager *man;
2396 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2399 DRM_ERROR("Trying to evict invalid memory type\n");
2403 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2406 #if defined(CONFIG_DEBUG_FS)
2408 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2410 struct amdgpu_device *adev = m->private;
2412 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2415 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2418 * amdgpu_ttm_vram_read - Linear read access to VRAM
2420 * Accesses VRAM via MMIO for debugging purposes.
2422 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2423 size_t size, loff_t *pos)
2425 struct amdgpu_device *adev = file_inode(f)->i_private;
2428 if (size & 0x3 || *pos & 0x3)
2431 if (*pos >= adev->gmc.mc_vram_size)
2434 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2436 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2437 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2439 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2440 if (copy_to_user(buf, value, bytes))
2453 * amdgpu_ttm_vram_write - Linear write access to VRAM
2455 * Accesses VRAM via MMIO for debugging purposes.
2457 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2458 size_t size, loff_t *pos)
2460 struct amdgpu_device *adev = file_inode(f)->i_private;
2464 if (size & 0x3 || *pos & 0x3)
2467 if (*pos >= adev->gmc.mc_vram_size)
2473 if (*pos >= adev->gmc.mc_vram_size)
2476 r = get_user(value, (uint32_t *)buf);
2480 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2491 static const struct file_operations amdgpu_ttm_vram_fops = {
2492 .owner = THIS_MODULE,
2493 .read = amdgpu_ttm_vram_read,
2494 .write = amdgpu_ttm_vram_write,
2495 .llseek = default_llseek,
2499 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2501 * This function is used to read memory that has been mapped to the
2502 * GPU and the known addresses are not physical addresses but instead
2503 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2505 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2506 size_t size, loff_t *pos)
2508 struct amdgpu_device *adev = file_inode(f)->i_private;
2509 struct iommu_domain *dom;
2513 /* retrieve the IOMMU domain if any for this device */
2514 dom = iommu_get_domain_for_dev(adev->dev);
2517 phys_addr_t addr = *pos & PAGE_MASK;
2518 loff_t off = *pos & ~PAGE_MASK;
2519 size_t bytes = PAGE_SIZE - off;
2524 bytes = min(bytes, size);
2526 /* Translate the bus address to a physical address. If
2527 * the domain is NULL it means there is no IOMMU active
2528 * and the address translation is the identity
2530 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2532 pfn = addr >> PAGE_SHIFT;
2533 if (!pfn_valid(pfn))
2536 p = pfn_to_page(pfn);
2537 if (p->mapping != adev->mman.bdev.dev_mapping)
2540 ptr = kmap_local_page(p);
2541 r = copy_to_user(buf, ptr + off, bytes);
2555 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2557 * This function is used to write memory that has been mapped to the
2558 * GPU and the known addresses are not physical addresses but instead
2559 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2561 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2562 size_t size, loff_t *pos)
2564 struct amdgpu_device *adev = file_inode(f)->i_private;
2565 struct iommu_domain *dom;
2569 dom = iommu_get_domain_for_dev(adev->dev);
2572 phys_addr_t addr = *pos & PAGE_MASK;
2573 loff_t off = *pos & ~PAGE_MASK;
2574 size_t bytes = PAGE_SIZE - off;
2579 bytes = min(bytes, size);
2581 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2583 pfn = addr >> PAGE_SHIFT;
2584 if (!pfn_valid(pfn))
2587 p = pfn_to_page(pfn);
2588 if (p->mapping != adev->mman.bdev.dev_mapping)
2591 ptr = kmap_local_page(p);
2592 r = copy_from_user(ptr + off, buf, bytes);
2605 static const struct file_operations amdgpu_ttm_iomem_fops = {
2606 .owner = THIS_MODULE,
2607 .read = amdgpu_iomem_read,
2608 .write = amdgpu_iomem_write,
2609 .llseek = default_llseek
2614 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2616 #if defined(CONFIG_DEBUG_FS)
2617 struct drm_minor *minor = adev_to_drm(adev)->primary;
2618 struct dentry *root = minor->debugfs_root;
2620 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2621 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2622 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2623 &amdgpu_ttm_iomem_fops);
2624 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2625 &amdgpu_ttm_page_pool_fops);
2626 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2628 root, "amdgpu_vram_mm");
2629 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2631 root, "amdgpu_gtt_mm");
2632 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2634 root, "amdgpu_gds_mm");
2635 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2637 root, "amdgpu_gws_mm");
2638 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2640 root, "amdgpu_oa_mm");