]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drm/ttm: cleanup BO size handling v3
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/swiotlb.h>
43 #include <linux/dma-buf.h>
44 #include <linux/sizes.h>
45
46 #include <drm/ttm/ttm_bo_api.h>
47 #include <drm/ttm/ttm_bo_driver.h>
48 #include <drm/ttm/ttm_placement.h>
49
50 #include <drm/drm_debugfs.h>
51 #include <drm/amdgpu_drm.h>
52
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_atomfirmware.h"
60 #include "bif/bif_4_1_d.h"
61
62 #define AMDGPU_TTM_VRAM_MAX_DW_READ     (size_t)128
63
64 static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
65                                    struct ttm_tt *ttm,
66                                    struct ttm_resource *bo_mem);
67 static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
68                                       struct ttm_tt *ttm);
69
70 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
71                                     unsigned int type,
72                                     uint64_t size_in_page)
73 {
74         return ttm_range_man_init(&adev->mman.bdev, type,
75                                   false, size_in_page);
76 }
77
78 /**
79  * amdgpu_evict_flags - Compute placement flags
80  *
81  * @bo: The buffer object to evict
82  * @placement: Possible destination(s) for evicted BO
83  *
84  * Fill in placement data when ttm_bo_evict() is called
85  */
86 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
87                                 struct ttm_placement *placement)
88 {
89         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
90         struct amdgpu_bo *abo;
91         static const struct ttm_place placements = {
92                 .fpfn = 0,
93                 .lpfn = 0,
94                 .mem_type = TTM_PL_SYSTEM,
95                 .flags = 0
96         };
97
98         /* Don't handle scatter gather BOs */
99         if (bo->type == ttm_bo_type_sg) {
100                 placement->num_placement = 0;
101                 placement->num_busy_placement = 0;
102                 return;
103         }
104
105         /* Object isn't an AMDGPU object so ignore */
106         if (!amdgpu_bo_is_amdgpu_bo(bo)) {
107                 placement->placement = &placements;
108                 placement->busy_placement = &placements;
109                 placement->num_placement = 1;
110                 placement->num_busy_placement = 1;
111                 return;
112         }
113
114         abo = ttm_to_amdgpu_bo(bo);
115         switch (bo->mem.mem_type) {
116         case AMDGPU_PL_GDS:
117         case AMDGPU_PL_GWS:
118         case AMDGPU_PL_OA:
119                 placement->num_placement = 0;
120                 placement->num_busy_placement = 0;
121                 return;
122
123         case TTM_PL_VRAM:
124                 if (!adev->mman.buffer_funcs_enabled) {
125                         /* Move to system memory */
126                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
127                 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
128                            !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
129                            amdgpu_bo_in_cpu_visible_vram(abo)) {
130
131                         /* Try evicting to the CPU inaccessible part of VRAM
132                          * first, but only set GTT as busy placement, so this
133                          * BO will be evicted to GTT rather than causing other
134                          * BOs to be evicted from VRAM
135                          */
136                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
137                                                          AMDGPU_GEM_DOMAIN_GTT);
138                         abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
139                         abo->placements[0].lpfn = 0;
140                         abo->placement.busy_placement = &abo->placements[1];
141                         abo->placement.num_busy_placement = 1;
142                 } else {
143                         /* Move to GTT memory */
144                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
145                 }
146                 break;
147         case TTM_PL_TT:
148         default:
149                 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
150                 break;
151         }
152         *placement = abo->placement;
153 }
154
155 /**
156  * amdgpu_verify_access - Verify access for a mmap call
157  *
158  * @bo: The buffer object to map
159  * @filp: The file pointer from the process performing the mmap
160  *
161  * This is called by ttm_bo_mmap() to verify whether a process
162  * has the right to mmap a BO to their process space.
163  */
164 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
165 {
166         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
167
168         /*
169          * Don't verify access for KFD BOs. They don't have a GEM
170          * object associated with them.
171          */
172         if (abo->kfd_bo)
173                 return 0;
174
175         if (amdgpu_ttm_tt_get_usermm(bo->ttm))
176                 return -EPERM;
177         return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
178                                           filp->private_data);
179 }
180
181 /**
182  * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
183  *
184  * @bo: The bo to assign the memory to.
185  * @mm_node: Memory manager node for drm allocator.
186  * @mem: The region where the bo resides.
187  *
188  */
189 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
190                                     struct drm_mm_node *mm_node,
191                                     struct ttm_resource *mem)
192 {
193         uint64_t addr = 0;
194
195         if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
196                 addr = mm_node->start << PAGE_SHIFT;
197                 addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
198                                                 mem->mem_type);
199         }
200         return addr;
201 }
202
203 /**
204  * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
205  * @offset. It also modifies the offset to be within the drm_mm_node returned
206  *
207  * @mem: The region where the bo resides.
208  * @offset: The offset that drm_mm_node is used for finding.
209  *
210  */
211 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
212                                                uint64_t *offset)
213 {
214         struct drm_mm_node *mm_node = mem->mm_node;
215
216         while (*offset >= (mm_node->size << PAGE_SHIFT)) {
217                 *offset -= (mm_node->size << PAGE_SHIFT);
218                 ++mm_node;
219         }
220         return mm_node;
221 }
222
223 /**
224  * amdgpu_ttm_map_buffer - Map memory into the GART windows
225  * @bo: buffer object to map
226  * @mem: memory object to map
227  * @mm_node: drm_mm node object to map
228  * @num_pages: number of pages to map
229  * @offset: offset into @mm_node where to start
230  * @window: which GART window to use
231  * @ring: DMA ring to use for the copy
232  * @tmz: if we should setup a TMZ enabled mapping
233  * @addr: resulting address inside the MC address space
234  *
235  * Setup one of the GART windows to access a specific piece of memory or return
236  * the physical address for local memory.
237  */
238 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
239                                  struct ttm_resource *mem,
240                                  struct drm_mm_node *mm_node,
241                                  unsigned num_pages, uint64_t offset,
242                                  unsigned window, struct amdgpu_ring *ring,
243                                  bool tmz, uint64_t *addr)
244 {
245         struct amdgpu_device *adev = ring->adev;
246         struct amdgpu_job *job;
247         unsigned num_dw, num_bytes;
248         struct dma_fence *fence;
249         uint64_t src_addr, dst_addr;
250         void *cpu_addr;
251         uint64_t flags;
252         unsigned int i;
253         int r;
254
255         BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
256                AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
257
258         /* Map only what can't be accessed directly */
259         if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
260                 *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
261                 return 0;
262         }
263
264         *addr = adev->gmc.gart_start;
265         *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
266                 AMDGPU_GPU_PAGE_SIZE;
267         *addr += offset & ~PAGE_MASK;
268
269         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
270         num_bytes = num_pages * 8;
271
272         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
273                                      AMDGPU_IB_POOL_DELAYED, &job);
274         if (r)
275                 return r;
276
277         src_addr = num_dw * 4;
278         src_addr += job->ibs[0].gpu_addr;
279
280         dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
281         dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
282         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
283                                 dst_addr, num_bytes, false);
284
285         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
286         WARN_ON(job->ibs[0].length_dw > num_dw);
287
288         flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
289         if (tmz)
290                 flags |= AMDGPU_PTE_TMZ;
291
292         cpu_addr = &job->ibs[0].ptr[num_dw];
293
294         if (mem->mem_type == TTM_PL_TT) {
295                 dma_addr_t *dma_address;
296
297                 dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
298                 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
299                                     cpu_addr);
300                 if (r)
301                         goto error_free;
302         } else {
303                 dma_addr_t dma_address;
304
305                 dma_address = (mm_node->start << PAGE_SHIFT) + offset;
306                 dma_address += adev->vm_manager.vram_base_offset;
307
308                 for (i = 0; i < num_pages; ++i) {
309                         r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
310                                             &dma_address, flags, cpu_addr);
311                         if (r)
312                                 goto error_free;
313
314                         dma_address += PAGE_SIZE;
315                 }
316         }
317
318         r = amdgpu_job_submit(job, &adev->mman.entity,
319                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
320         if (r)
321                 goto error_free;
322
323         dma_fence_put(fence);
324
325         return r;
326
327 error_free:
328         amdgpu_job_free(job);
329         return r;
330 }
331
332 /**
333  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
334  * @adev: amdgpu device
335  * @src: buffer/address where to read from
336  * @dst: buffer/address where to write to
337  * @size: number of bytes to copy
338  * @tmz: if a secure copy should be used
339  * @resv: resv object to sync to
340  * @f: Returns the last fence if multiple jobs are submitted.
341  *
342  * The function copies @size bytes from {src->mem + src->offset} to
343  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
344  * move and different for a BO to BO copy.
345  *
346  */
347 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
348                                const struct amdgpu_copy_mem *src,
349                                const struct amdgpu_copy_mem *dst,
350                                uint64_t size, bool tmz,
351                                struct dma_resv *resv,
352                                struct dma_fence **f)
353 {
354         const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
355                                         AMDGPU_GPU_PAGE_SIZE);
356
357         uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
358         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
359         struct drm_mm_node *src_mm, *dst_mm;
360         struct dma_fence *fence = NULL;
361         int r = 0;
362
363         if (!adev->mman.buffer_funcs_enabled) {
364                 DRM_ERROR("Trying to move memory with ring turned off.\n");
365                 return -EINVAL;
366         }
367
368         src_offset = src->offset;
369         if (src->mem->mm_node) {
370                 src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
371                 src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
372         } else {
373                 src_mm = NULL;
374                 src_node_size = ULLONG_MAX;
375         }
376
377         dst_offset = dst->offset;
378         if (dst->mem->mm_node) {
379                 dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
380                 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
381         } else {
382                 dst_mm = NULL;
383                 dst_node_size = ULLONG_MAX;
384         }
385
386         mutex_lock(&adev->mman.gtt_window_lock);
387
388         while (size) {
389                 uint32_t src_page_offset = src_offset & ~PAGE_MASK;
390                 uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
391                 struct dma_fence *next;
392                 uint32_t cur_size;
393                 uint64_t from, to;
394
395                 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
396                  * begins at an offset, then adjust the size accordingly
397                  */
398                 cur_size = max(src_page_offset, dst_page_offset);
399                 cur_size = min(min3(src_node_size, dst_node_size, size),
400                                (uint64_t)(GTT_MAX_BYTES - cur_size));
401
402                 /* Map src to window 0 and dst to window 1. */
403                 r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
404                                           PFN_UP(cur_size + src_page_offset),
405                                           src_offset, 0, ring, tmz, &from);
406                 if (r)
407                         goto error;
408
409                 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
410                                           PFN_UP(cur_size + dst_page_offset),
411                                           dst_offset, 1, ring, tmz, &to);
412                 if (r)
413                         goto error;
414
415                 r = amdgpu_copy_buffer(ring, from, to, cur_size,
416                                        resv, &next, false, true, tmz);
417                 if (r)
418                         goto error;
419
420                 dma_fence_put(fence);
421                 fence = next;
422
423                 size -= cur_size;
424                 if (!size)
425                         break;
426
427                 src_node_size -= cur_size;
428                 if (!src_node_size) {
429                         ++src_mm;
430                         src_node_size = src_mm->size << PAGE_SHIFT;
431                         src_offset = 0;
432                 } else {
433                         src_offset += cur_size;
434                 }
435
436                 dst_node_size -= cur_size;
437                 if (!dst_node_size) {
438                         ++dst_mm;
439                         dst_node_size = dst_mm->size << PAGE_SHIFT;
440                         dst_offset = 0;
441                 } else {
442                         dst_offset += cur_size;
443                 }
444         }
445 error:
446         mutex_unlock(&adev->mman.gtt_window_lock);
447         if (f)
448                 *f = dma_fence_get(fence);
449         dma_fence_put(fence);
450         return r;
451 }
452
453 /**
454  * amdgpu_move_blit - Copy an entire buffer to another buffer
455  *
456  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
457  * help move buffers to and from VRAM.
458  */
459 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
460                             bool evict,
461                             struct ttm_resource *new_mem,
462                             struct ttm_resource *old_mem)
463 {
464         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
465         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
466         struct amdgpu_copy_mem src, dst;
467         struct dma_fence *fence = NULL;
468         int r;
469
470         src.bo = bo;
471         dst.bo = bo;
472         src.mem = old_mem;
473         dst.mem = new_mem;
474         src.offset = 0;
475         dst.offset = 0;
476
477         r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
478                                        new_mem->num_pages << PAGE_SHIFT,
479                                        amdgpu_bo_encrypted(abo),
480                                        bo->base.resv, &fence);
481         if (r)
482                 goto error;
483
484         /* clear the space being freed */
485         if (old_mem->mem_type == TTM_PL_VRAM &&
486             (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
487                 struct dma_fence *wipe_fence = NULL;
488
489                 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
490                                        NULL, &wipe_fence);
491                 if (r) {
492                         goto error;
493                 } else if (wipe_fence) {
494                         dma_fence_put(fence);
495                         fence = wipe_fence;
496                 }
497         }
498
499         /* Always block for VM page tables before committing the new location */
500         if (bo->type == ttm_bo_type_kernel)
501                 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
502         else
503                 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
504         dma_fence_put(fence);
505         return r;
506
507 error:
508         if (fence)
509                 dma_fence_wait(fence, false);
510         dma_fence_put(fence);
511         return r;
512 }
513
514 /**
515  * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
516  *
517  * Called by amdgpu_bo_move()
518  */
519 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
520                                struct ttm_resource *mem)
521 {
522         struct drm_mm_node *nodes = mem->mm_node;
523
524         if (mem->mem_type == TTM_PL_SYSTEM ||
525             mem->mem_type == TTM_PL_TT)
526                 return true;
527         if (mem->mem_type != TTM_PL_VRAM)
528                 return false;
529
530         /* ttm_resource_ioremap only supports contiguous memory */
531         if (nodes->size != mem->num_pages)
532                 return false;
533
534         return ((nodes->start + nodes->size) << PAGE_SHIFT)
535                 <= adev->gmc.visible_vram_size;
536 }
537
538 /**
539  * amdgpu_bo_move - Move a buffer object to a new memory location
540  *
541  * Called by ttm_bo_handle_move_mem()
542  */
543 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
544                           struct ttm_operation_ctx *ctx,
545                           struct ttm_resource *new_mem,
546                           struct ttm_place *hop)
547 {
548         struct amdgpu_device *adev;
549         struct amdgpu_bo *abo;
550         struct ttm_resource *old_mem = &bo->mem;
551         int r;
552
553         if (new_mem->mem_type == TTM_PL_TT) {
554                 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
555                 if (r)
556                         return r;
557         }
558
559         /* Can't move a pinned BO */
560         abo = ttm_to_amdgpu_bo(bo);
561         if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
562                 return -EINVAL;
563
564         adev = amdgpu_ttm_adev(bo->bdev);
565
566         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
567                 ttm_bo_move_null(bo, new_mem);
568                 goto out;
569         }
570         if (old_mem->mem_type == TTM_PL_SYSTEM &&
571             new_mem->mem_type == TTM_PL_TT) {
572                 ttm_bo_move_null(bo, new_mem);
573                 goto out;
574         }
575         if (old_mem->mem_type == TTM_PL_TT &&
576             new_mem->mem_type == TTM_PL_SYSTEM) {
577                 r = ttm_bo_wait_ctx(bo, ctx);
578                 if (r)
579                         return r;
580
581                 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
582                 ttm_resource_free(bo, &bo->mem);
583                 ttm_bo_assign_mem(bo, new_mem);
584                 goto out;
585         }
586
587         if (old_mem->mem_type == AMDGPU_PL_GDS ||
588             old_mem->mem_type == AMDGPU_PL_GWS ||
589             old_mem->mem_type == AMDGPU_PL_OA ||
590             new_mem->mem_type == AMDGPU_PL_GDS ||
591             new_mem->mem_type == AMDGPU_PL_GWS ||
592             new_mem->mem_type == AMDGPU_PL_OA) {
593                 /* Nothing to save here */
594                 ttm_bo_move_null(bo, new_mem);
595                 goto out;
596         }
597
598         if (adev->mman.buffer_funcs_enabled) {
599                 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
600                       new_mem->mem_type == TTM_PL_VRAM) ||
601                      (old_mem->mem_type == TTM_PL_VRAM &&
602                       new_mem->mem_type == TTM_PL_SYSTEM))) {
603                         hop->fpfn = 0;
604                         hop->lpfn = 0;
605                         hop->mem_type = TTM_PL_TT;
606                         hop->flags = 0;
607                         return -EMULTIHOP;
608                 }
609
610                 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
611         } else {
612                 r = -ENODEV;
613         }
614
615         if (r) {
616                 /* Check that all memory is CPU accessible */
617                 if (!amdgpu_mem_visible(adev, old_mem) ||
618                     !amdgpu_mem_visible(adev, new_mem)) {
619                         pr_err("Move buffer fallback to memcpy unavailable\n");
620                         return r;
621                 }
622
623                 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
624                 if (r)
625                         return r;
626         }
627
628         if (bo->type == ttm_bo_type_device &&
629             new_mem->mem_type == TTM_PL_VRAM &&
630             old_mem->mem_type != TTM_PL_VRAM) {
631                 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
632                  * accesses the BO after it's moved.
633                  */
634                 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
635         }
636
637 out:
638         /* update statistics */
639         atomic64_add(bo->base.size, &adev->num_bytes_moved);
640         amdgpu_bo_move_notify(bo, evict, new_mem);
641         return 0;
642 }
643
644 /**
645  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
646  *
647  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
648  */
649 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
650 {
651         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
652         struct drm_mm_node *mm_node = mem->mm_node;
653         size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
654
655         switch (mem->mem_type) {
656         case TTM_PL_SYSTEM:
657                 /* system memory */
658                 return 0;
659         case TTM_PL_TT:
660                 break;
661         case TTM_PL_VRAM:
662                 mem->bus.offset = mem->start << PAGE_SHIFT;
663                 /* check if it's visible */
664                 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
665                         return -EINVAL;
666                 /* Only physically contiguous buffers apply. In a contiguous
667                  * buffer, size of the first mm_node would match the number of
668                  * pages in ttm_resource.
669                  */
670                 if (adev->mman.aper_base_kaddr &&
671                     (mm_node->size == mem->num_pages))
672                         mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
673                                         mem->bus.offset;
674
675                 mem->bus.offset += adev->gmc.aper_base;
676                 mem->bus.is_iomem = true;
677                 mem->bus.caching = ttm_write_combined;
678                 break;
679         default:
680                 return -EINVAL;
681         }
682         return 0;
683 }
684
685 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
686                                            unsigned long page_offset)
687 {
688         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
689         uint64_t offset = (page_offset << PAGE_SHIFT);
690         struct drm_mm_node *mm;
691
692         mm = amdgpu_find_mm_node(&bo->mem, &offset);
693         offset += adev->gmc.aper_base;
694         return mm->start + (offset >> PAGE_SHIFT);
695 }
696
697 /**
698  * amdgpu_ttm_domain_start - Returns GPU start address
699  * @adev: amdgpu device object
700  * @type: type of the memory
701  *
702  * Returns:
703  * GPU start address of a memory domain
704  */
705
706 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
707 {
708         switch (type) {
709         case TTM_PL_TT:
710                 return adev->gmc.gart_start;
711         case TTM_PL_VRAM:
712                 return adev->gmc.vram_start;
713         }
714
715         return 0;
716 }
717
718 /*
719  * TTM backend functions.
720  */
721 struct amdgpu_ttm_tt {
722         struct ttm_tt   ttm;
723         struct drm_gem_object   *gobj;
724         u64                     offset;
725         uint64_t                userptr;
726         struct task_struct      *usertask;
727         uint32_t                userflags;
728         bool                    bound;
729 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
730         struct hmm_range        *range;
731 #endif
732 };
733
734 #ifdef CONFIG_DRM_AMDGPU_USERPTR
735 /**
736  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
737  * memory and start HMM tracking CPU page table update
738  *
739  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
740  * once afterwards to stop HMM tracking
741  */
742 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
743 {
744         struct ttm_tt *ttm = bo->tbo.ttm;
745         struct amdgpu_ttm_tt *gtt = (void *)ttm;
746         unsigned long start = gtt->userptr;
747         struct vm_area_struct *vma;
748         struct hmm_range *range;
749         unsigned long timeout;
750         struct mm_struct *mm;
751         unsigned long i;
752         int r = 0;
753
754         mm = bo->notifier.mm;
755         if (unlikely(!mm)) {
756                 DRM_DEBUG_DRIVER("BO is not registered?\n");
757                 return -EFAULT;
758         }
759
760         /* Another get_user_pages is running at the same time?? */
761         if (WARN_ON(gtt->range))
762                 return -EFAULT;
763
764         if (!mmget_not_zero(mm)) /* Happens during process shutdown */
765                 return -ESRCH;
766
767         range = kzalloc(sizeof(*range), GFP_KERNEL);
768         if (unlikely(!range)) {
769                 r = -ENOMEM;
770                 goto out;
771         }
772         range->notifier = &bo->notifier;
773         range->start = bo->notifier.interval_tree.start;
774         range->end = bo->notifier.interval_tree.last + 1;
775         range->default_flags = HMM_PFN_REQ_FAULT;
776         if (!amdgpu_ttm_tt_is_readonly(ttm))
777                 range->default_flags |= HMM_PFN_REQ_WRITE;
778
779         range->hmm_pfns = kvmalloc_array(ttm->num_pages,
780                                          sizeof(*range->hmm_pfns), GFP_KERNEL);
781         if (unlikely(!range->hmm_pfns)) {
782                 r = -ENOMEM;
783                 goto out_free_ranges;
784         }
785
786         mmap_read_lock(mm);
787         vma = find_vma(mm, start);
788         if (unlikely(!vma || start < vma->vm_start)) {
789                 r = -EFAULT;
790                 goto out_unlock;
791         }
792         if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
793                 vma->vm_file)) {
794                 r = -EPERM;
795                 goto out_unlock;
796         }
797         mmap_read_unlock(mm);
798         timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
799
800 retry:
801         range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
802
803         mmap_read_lock(mm);
804         r = hmm_range_fault(range);
805         mmap_read_unlock(mm);
806         if (unlikely(r)) {
807                 /*
808                  * FIXME: This timeout should encompass the retry from
809                  * mmu_interval_read_retry() as well.
810                  */
811                 if (r == -EBUSY && !time_after(jiffies, timeout))
812                         goto retry;
813                 goto out_free_pfns;
814         }
815
816         /*
817          * Due to default_flags, all pages are HMM_PFN_VALID or
818          * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
819          * the notifier_lock, and mmu_interval_read_retry() must be done first.
820          */
821         for (i = 0; i < ttm->num_pages; i++)
822                 pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
823
824         gtt->range = range;
825         mmput(mm);
826
827         return 0;
828
829 out_unlock:
830         mmap_read_unlock(mm);
831 out_free_pfns:
832         kvfree(range->hmm_pfns);
833 out_free_ranges:
834         kfree(range);
835 out:
836         mmput(mm);
837         return r;
838 }
839
840 /**
841  * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
842  * Check if the pages backing this ttm range have been invalidated
843  *
844  * Returns: true if pages are still valid
845  */
846 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
847 {
848         struct amdgpu_ttm_tt *gtt = (void *)ttm;
849         bool r = false;
850
851         if (!gtt || !gtt->userptr)
852                 return false;
853
854         DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
855                 gtt->userptr, ttm->num_pages);
856
857         WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
858                 "No user pages to check\n");
859
860         if (gtt->range) {
861                 /*
862                  * FIXME: Must always hold notifier_lock for this, and must
863                  * not ignore the return code.
864                  */
865                 r = mmu_interval_read_retry(gtt->range->notifier,
866                                          gtt->range->notifier_seq);
867                 kvfree(gtt->range->hmm_pfns);
868                 kfree(gtt->range);
869                 gtt->range = NULL;
870         }
871
872         return !r;
873 }
874 #endif
875
876 /**
877  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
878  *
879  * Called by amdgpu_cs_list_validate(). This creates the page list
880  * that backs user memory and will ultimately be mapped into the device
881  * address space.
882  */
883 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
884 {
885         unsigned long i;
886
887         for (i = 0; i < ttm->num_pages; ++i)
888                 ttm->pages[i] = pages ? pages[i] : NULL;
889 }
890
891 /**
892  * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
893  *
894  * Called by amdgpu_ttm_backend_bind()
895  **/
896 static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
897                                      struct ttm_tt *ttm)
898 {
899         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
900         struct amdgpu_ttm_tt *gtt = (void *)ttm;
901         int r;
902
903         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
904         enum dma_data_direction direction = write ?
905                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
906
907         /* Allocate an SG array and squash pages into it */
908         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
909                                       ttm->num_pages << PAGE_SHIFT,
910                                       GFP_KERNEL);
911         if (r)
912                 goto release_sg;
913
914         /* Map SG to device */
915         r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
916         if (r)
917                 goto release_sg;
918
919         /* convert SG to linear array of pages and dma addresses */
920         drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
921                                        ttm->num_pages);
922
923         return 0;
924
925 release_sg:
926         kfree(ttm->sg);
927         ttm->sg = NULL;
928         return r;
929 }
930
931 /**
932  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
933  */
934 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
935                                         struct ttm_tt *ttm)
936 {
937         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
938         struct amdgpu_ttm_tt *gtt = (void *)ttm;
939
940         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
941         enum dma_data_direction direction = write ?
942                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
943
944         /* double check that we don't free the table twice */
945         if (!ttm->sg->sgl)
946                 return;
947
948         /* unmap the pages mapped to the device */
949         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
950         sg_free_table(ttm->sg);
951
952 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
953         if (gtt->range) {
954                 unsigned long i;
955
956                 for (i = 0; i < ttm->num_pages; i++) {
957                         if (ttm->pages[i] !=
958                             hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
959                                 break;
960                 }
961
962                 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
963         }
964 #endif
965 }
966
967 static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
968                                 struct ttm_buffer_object *tbo,
969                                 uint64_t flags)
970 {
971         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
972         struct ttm_tt *ttm = tbo->ttm;
973         struct amdgpu_ttm_tt *gtt = (void *)ttm;
974         int r;
975
976         if (amdgpu_bo_encrypted(abo))
977                 flags |= AMDGPU_PTE_TMZ;
978
979         if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
980                 uint64_t page_idx = 1;
981
982                 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
983                                 ttm->pages, gtt->ttm.dma_address, flags);
984                 if (r)
985                         goto gart_bind_fail;
986
987                 /* The memory type of the first page defaults to UC. Now
988                  * modify the memory type to NC from the second page of
989                  * the BO onward.
990                  */
991                 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
992                 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
993
994                 r = amdgpu_gart_bind(adev,
995                                 gtt->offset + (page_idx << PAGE_SHIFT),
996                                 ttm->num_pages - page_idx,
997                                 &ttm->pages[page_idx],
998                                 &(gtt->ttm.dma_address[page_idx]), flags);
999         } else {
1000                 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1001                                      ttm->pages, gtt->ttm.dma_address, flags);
1002         }
1003
1004 gart_bind_fail:
1005         if (r)
1006                 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
1007                           ttm->num_pages, gtt->offset);
1008
1009         return r;
1010 }
1011
1012 /**
1013  * amdgpu_ttm_backend_bind - Bind GTT memory
1014  *
1015  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1016  * This handles binding GTT memory to the device address space.
1017  */
1018 static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
1019                                    struct ttm_tt *ttm,
1020                                    struct ttm_resource *bo_mem)
1021 {
1022         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1023         struct amdgpu_ttm_tt *gtt = (void*)ttm;
1024         uint64_t flags;
1025         int r = 0;
1026
1027         if (!bo_mem)
1028                 return -EINVAL;
1029
1030         if (gtt->bound)
1031                 return 0;
1032
1033         if (gtt->userptr) {
1034                 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
1035                 if (r) {
1036                         DRM_ERROR("failed to pin userptr\n");
1037                         return r;
1038                 }
1039         }
1040         if (!ttm->num_pages) {
1041                 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
1042                      ttm->num_pages, bo_mem, ttm);
1043         }
1044
1045         if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1046             bo_mem->mem_type == AMDGPU_PL_GWS ||
1047             bo_mem->mem_type == AMDGPU_PL_OA)
1048                 return -EINVAL;
1049
1050         if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1051                 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1052                 return 0;
1053         }
1054
1055         /* compute PTE flags relevant to this BO memory */
1056         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1057
1058         /* bind pages into GART page tables */
1059         gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1060         r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1061                 ttm->pages, gtt->ttm.dma_address, flags);
1062
1063         if (r)
1064                 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
1065                           ttm->num_pages, gtt->offset);
1066         gtt->bound = true;
1067         return r;
1068 }
1069
1070 /**
1071  * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
1072  * through AGP or GART aperture.
1073  *
1074  * If bo is accessible through AGP aperture, then use AGP aperture
1075  * to access bo; otherwise allocate logical space in GART aperture
1076  * and map bo to GART aperture.
1077  */
1078 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1079 {
1080         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1081         struct ttm_operation_ctx ctx = { false, false };
1082         struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1083         struct ttm_resource tmp;
1084         struct ttm_placement placement;
1085         struct ttm_place placements;
1086         uint64_t addr, flags;
1087         int r;
1088
1089         if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1090                 return 0;
1091
1092         addr = amdgpu_gmc_agp_addr(bo);
1093         if (addr != AMDGPU_BO_INVALID_OFFSET) {
1094                 bo->mem.start = addr >> PAGE_SHIFT;
1095         } else {
1096
1097                 /* allocate GART space */
1098                 tmp = bo->mem;
1099                 tmp.mm_node = NULL;
1100                 placement.num_placement = 1;
1101                 placement.placement = &placements;
1102                 placement.num_busy_placement = 1;
1103                 placement.busy_placement = &placements;
1104                 placements.fpfn = 0;
1105                 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1106                 placements.mem_type = TTM_PL_TT;
1107                 placements.flags = bo->mem.placement;
1108
1109                 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1110                 if (unlikely(r))
1111                         return r;
1112
1113                 /* compute PTE flags for this buffer object */
1114                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1115
1116                 /* Bind pages */
1117                 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1118                 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1119                 if (unlikely(r)) {
1120                         ttm_resource_free(bo, &tmp);
1121                         return r;
1122                 }
1123
1124                 ttm_resource_free(bo, &bo->mem);
1125                 bo->mem = tmp;
1126         }
1127
1128         return 0;
1129 }
1130
1131 /**
1132  * amdgpu_ttm_recover_gart - Rebind GTT pages
1133  *
1134  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1135  * rebind GTT pages during a GPU reset.
1136  */
1137 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1138 {
1139         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1140         uint64_t flags;
1141         int r;
1142
1143         if (!tbo->ttm)
1144                 return 0;
1145
1146         flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1147         r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1148
1149         return r;
1150 }
1151
1152 /**
1153  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1154  *
1155  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1156  * ttm_tt_destroy().
1157  */
1158 static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
1159                                       struct ttm_tt *ttm)
1160 {
1161         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1162         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1163         int r;
1164
1165         if (!gtt->bound)
1166                 return;
1167
1168         /* if the pages have userptr pinning then clear that first */
1169         if (gtt->userptr)
1170                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1171
1172         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1173                 return;
1174
1175         /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1176         r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1177         if (r)
1178                 DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1179                           gtt->ttm.num_pages, gtt->offset);
1180         gtt->bound = false;
1181 }
1182
1183 static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
1184                                        struct ttm_tt *ttm)
1185 {
1186         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1187
1188         amdgpu_ttm_backend_unbind(bdev, ttm);
1189         ttm_tt_destroy_common(bdev, ttm);
1190         if (gtt->usertask)
1191                 put_task_struct(gtt->usertask);
1192
1193         ttm_tt_fini(&gtt->ttm);
1194         kfree(gtt);
1195 }
1196
1197 /**
1198  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1199  *
1200  * @bo: The buffer object to create a GTT ttm_tt object around
1201  *
1202  * Called by ttm_tt_create().
1203  */
1204 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1205                                            uint32_t page_flags)
1206 {
1207         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1208         struct amdgpu_ttm_tt *gtt;
1209         enum ttm_caching caching;
1210
1211         gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1212         if (gtt == NULL) {
1213                 return NULL;
1214         }
1215         gtt->gobj = &bo->base;
1216
1217         if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1218                 caching = ttm_write_combined;
1219         else
1220                 caching = ttm_cached;
1221
1222         /* allocate space for the uninitialized page entries */
1223         if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1224                 kfree(gtt);
1225                 return NULL;
1226         }
1227         return &gtt->ttm;
1228 }
1229
1230 /**
1231  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1232  *
1233  * Map the pages of a ttm_tt object to an address space visible
1234  * to the underlying device.
1235  */
1236 static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
1237                                   struct ttm_tt *ttm,
1238                                   struct ttm_operation_ctx *ctx)
1239 {
1240         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1241         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1242
1243         /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1244         if (gtt && gtt->userptr) {
1245                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1246                 if (!ttm->sg)
1247                         return -ENOMEM;
1248
1249                 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1250                 return 0;
1251         }
1252
1253         if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1254                 if (!ttm->sg) {
1255                         struct dma_buf_attachment *attach;
1256                         struct sg_table *sgt;
1257
1258                         attach = gtt->gobj->import_attach;
1259                         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1260                         if (IS_ERR(sgt))
1261                                 return PTR_ERR(sgt);
1262
1263                         ttm->sg = sgt;
1264                 }
1265
1266                 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
1267                                                ttm->num_pages);
1268                 return 0;
1269         }
1270
1271         return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1272 }
1273
1274 /**
1275  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1276  *
1277  * Unmaps pages of a ttm_tt object from the device address space and
1278  * unpopulates the page array backing it.
1279  */
1280 static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1281                                      struct ttm_tt *ttm)
1282 {
1283         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1284         struct amdgpu_device *adev;
1285
1286         if (gtt && gtt->userptr) {
1287                 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1288                 kfree(ttm->sg);
1289                 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1290                 return;
1291         }
1292
1293         if (ttm->sg && gtt->gobj->import_attach) {
1294                 struct dma_buf_attachment *attach;
1295
1296                 attach = gtt->gobj->import_attach;
1297                 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1298                 ttm->sg = NULL;
1299                 return;
1300         }
1301
1302         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1303                 return;
1304
1305         adev = amdgpu_ttm_adev(bdev);
1306         return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1307 }
1308
1309 /**
1310  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1311  * task
1312  *
1313  * @bo: The ttm_buffer_object to bind this userptr to
1314  * @addr:  The address in the current tasks VM space to use
1315  * @flags: Requirements of userptr object.
1316  *
1317  * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1318  * to current task
1319  */
1320 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1321                               uint64_t addr, uint32_t flags)
1322 {
1323         struct amdgpu_ttm_tt *gtt;
1324
1325         if (!bo->ttm) {
1326                 /* TODO: We want a separate TTM object type for userptrs */
1327                 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1328                 if (bo->ttm == NULL)
1329                         return -ENOMEM;
1330         }
1331
1332         gtt = (void *)bo->ttm;
1333         gtt->userptr = addr;
1334         gtt->userflags = flags;
1335
1336         if (gtt->usertask)
1337                 put_task_struct(gtt->usertask);
1338         gtt->usertask = current->group_leader;
1339         get_task_struct(gtt->usertask);
1340
1341         return 0;
1342 }
1343
1344 /**
1345  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1346  */
1347 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1348 {
1349         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1350
1351         if (gtt == NULL)
1352                 return NULL;
1353
1354         if (gtt->usertask == NULL)
1355                 return NULL;
1356
1357         return gtt->usertask->mm;
1358 }
1359
1360 /**
1361  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1362  * address range for the current task.
1363  *
1364  */
1365 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1366                                   unsigned long end)
1367 {
1368         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1369         unsigned long size;
1370
1371         if (gtt == NULL || !gtt->userptr)
1372                 return false;
1373
1374         /* Return false if no part of the ttm_tt object lies within
1375          * the range
1376          */
1377         size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1378         if (gtt->userptr > end || gtt->userptr + size <= start)
1379                 return false;
1380
1381         return true;
1382 }
1383
1384 /**
1385  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1386  */
1387 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1388 {
1389         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1390
1391         if (gtt == NULL || !gtt->userptr)
1392                 return false;
1393
1394         return true;
1395 }
1396
1397 /**
1398  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1399  */
1400 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1401 {
1402         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1403
1404         if (gtt == NULL)
1405                 return false;
1406
1407         return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1408 }
1409
1410 /**
1411  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1412  *
1413  * @ttm: The ttm_tt object to compute the flags for
1414  * @mem: The memory registry backing this ttm_tt object
1415  *
1416  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1417  */
1418 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1419 {
1420         uint64_t flags = 0;
1421
1422         if (mem && mem->mem_type != TTM_PL_SYSTEM)
1423                 flags |= AMDGPU_PTE_VALID;
1424
1425         if (mem && mem->mem_type == TTM_PL_TT) {
1426                 flags |= AMDGPU_PTE_SYSTEM;
1427
1428                 if (ttm->caching == ttm_cached)
1429                         flags |= AMDGPU_PTE_SNOOPED;
1430         }
1431
1432         return flags;
1433 }
1434
1435 /**
1436  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1437  *
1438  * @ttm: The ttm_tt object to compute the flags for
1439  * @mem: The memory registry backing this ttm_tt object
1440
1441  * Figure out the flags to use for a VM PTE (Page Table Entry).
1442  */
1443 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1444                                  struct ttm_resource *mem)
1445 {
1446         uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1447
1448         flags |= adev->gart.gart_pte_flags;
1449         flags |= AMDGPU_PTE_READABLE;
1450
1451         if (!amdgpu_ttm_tt_is_readonly(ttm))
1452                 flags |= AMDGPU_PTE_WRITEABLE;
1453
1454         return flags;
1455 }
1456
1457 /**
1458  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1459  * object.
1460  *
1461  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1462  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1463  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1464  * used to clean out a memory space.
1465  */
1466 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1467                                             const struct ttm_place *place)
1468 {
1469         unsigned long num_pages = bo->mem.num_pages;
1470         struct drm_mm_node *node = bo->mem.mm_node;
1471         struct dma_resv_list *flist;
1472         struct dma_fence *f;
1473         int i;
1474
1475         if (bo->type == ttm_bo_type_kernel &&
1476             !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1477                 return false;
1478
1479         /* If bo is a KFD BO, check if the bo belongs to the current process.
1480          * If true, then return false as any KFD process needs all its BOs to
1481          * be resident to run successfully
1482          */
1483         flist = dma_resv_get_list(bo->base.resv);
1484         if (flist) {
1485                 for (i = 0; i < flist->shared_count; ++i) {
1486                         f = rcu_dereference_protected(flist->shared[i],
1487                                 dma_resv_held(bo->base.resv));
1488                         if (amdkfd_fence_check_mm(f, current->mm))
1489                                 return false;
1490                 }
1491         }
1492
1493         switch (bo->mem.mem_type) {
1494         case TTM_PL_TT:
1495                 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1496                     amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1497                         return false;
1498                 return true;
1499
1500         case TTM_PL_VRAM:
1501                 /* Check each drm MM node individually */
1502                 while (num_pages) {
1503                         if (place->fpfn < (node->start + node->size) &&
1504                             !(place->lpfn && place->lpfn <= node->start))
1505                                 return true;
1506
1507                         num_pages -= node->size;
1508                         ++node;
1509                 }
1510                 return false;
1511
1512         default:
1513                 break;
1514         }
1515
1516         return ttm_bo_eviction_valuable(bo, place);
1517 }
1518
1519 /**
1520  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1521  *
1522  * @bo:  The buffer object to read/write
1523  * @offset:  Offset into buffer object
1524  * @buf:  Secondary buffer to write/read from
1525  * @len: Length in bytes of access
1526  * @write:  true if writing
1527  *
1528  * This is used to access VRAM that backs a buffer object via MMIO
1529  * access for debugging purposes.
1530  */
1531 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1532                                     unsigned long offset,
1533                                     void *buf, int len, int write)
1534 {
1535         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1536         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1537         struct drm_mm_node *nodes;
1538         uint32_t value = 0;
1539         int ret = 0;
1540         uint64_t pos;
1541         unsigned long flags;
1542
1543         if (bo->mem.mem_type != TTM_PL_VRAM)
1544                 return -EIO;
1545
1546         pos = offset;
1547         nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
1548         pos += (nodes->start << PAGE_SHIFT);
1549
1550         while (len && pos < adev->gmc.mc_vram_size) {
1551                 uint64_t aligned_pos = pos & ~(uint64_t)3;
1552                 uint64_t bytes = 4 - (pos & 3);
1553                 uint32_t shift = (pos & 3) * 8;
1554                 uint32_t mask = 0xffffffff << shift;
1555
1556                 if (len < bytes) {
1557                         mask &= 0xffffffff >> (bytes - len) * 8;
1558                         bytes = len;
1559                 }
1560
1561                 if (mask != 0xffffffff) {
1562                         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1563                         WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1564                         WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1565                         if (!write || mask != 0xffffffff)
1566                                 value = RREG32_NO_KIQ(mmMM_DATA);
1567                         if (write) {
1568                                 value &= ~mask;
1569                                 value |= (*(uint32_t *)buf << shift) & mask;
1570                                 WREG32_NO_KIQ(mmMM_DATA, value);
1571                         }
1572                         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1573                         if (!write) {
1574                                 value = (value & mask) >> shift;
1575                                 memcpy(buf, &value, bytes);
1576                         }
1577                 } else {
1578                         bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1579                         bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1580
1581                         amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1582                                                   bytes, write);
1583                 }
1584
1585                 ret += bytes;
1586                 buf = (uint8_t *)buf + bytes;
1587                 pos += bytes;
1588                 len -= bytes;
1589                 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1590                         ++nodes;
1591                         pos = (nodes->start << PAGE_SHIFT);
1592                 }
1593         }
1594
1595         return ret;
1596 }
1597
1598 static void
1599 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1600 {
1601         amdgpu_bo_move_notify(bo, false, NULL);
1602 }
1603
1604 static struct ttm_bo_driver amdgpu_bo_driver = {
1605         .ttm_tt_create = &amdgpu_ttm_tt_create,
1606         .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1607         .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1608         .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1609         .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1610         .evict_flags = &amdgpu_evict_flags,
1611         .move = &amdgpu_bo_move,
1612         .verify_access = &amdgpu_verify_access,
1613         .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1614         .release_notify = &amdgpu_bo_release_notify,
1615         .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1616         .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1617         .access_memory = &amdgpu_ttm_access_memory,
1618         .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1619 };
1620
1621 /*
1622  * Firmware Reservation functions
1623  */
1624 /**
1625  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1626  *
1627  * @adev: amdgpu_device pointer
1628  *
1629  * free fw reserved vram if it has been reserved.
1630  */
1631 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1632 {
1633         amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1634                 NULL, &adev->mman.fw_vram_usage_va);
1635 }
1636
1637 /**
1638  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1639  *
1640  * @adev: amdgpu_device pointer
1641  *
1642  * create bo vram reservation from fw.
1643  */
1644 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1645 {
1646         uint64_t vram_size = adev->gmc.visible_vram_size;
1647
1648         adev->mman.fw_vram_usage_va = NULL;
1649         adev->mman.fw_vram_usage_reserved_bo = NULL;
1650
1651         if (adev->mman.fw_vram_usage_size == 0 ||
1652             adev->mman.fw_vram_usage_size > vram_size)
1653                 return 0;
1654
1655         return amdgpu_bo_create_kernel_at(adev,
1656                                           adev->mman.fw_vram_usage_start_offset,
1657                                           adev->mman.fw_vram_usage_size,
1658                                           AMDGPU_GEM_DOMAIN_VRAM,
1659                                           &adev->mman.fw_vram_usage_reserved_bo,
1660                                           &adev->mman.fw_vram_usage_va);
1661 }
1662
1663 /*
1664  * Memoy training reservation functions
1665  */
1666
1667 /**
1668  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1669  *
1670  * @adev: amdgpu_device pointer
1671  *
1672  * free memory training reserved vram if it has been reserved.
1673  */
1674 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1675 {
1676         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1677
1678         ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1679         amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1680         ctx->c2p_bo = NULL;
1681
1682         return 0;
1683 }
1684
1685 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1686 {
1687         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1688
1689         memset(ctx, 0, sizeof(*ctx));
1690
1691         ctx->c2p_train_data_offset =
1692                 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1693         ctx->p2c_train_data_offset =
1694                 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1695         ctx->train_data_size =
1696                 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1697         
1698         DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1699                         ctx->train_data_size,
1700                         ctx->p2c_train_data_offset,
1701                         ctx->c2p_train_data_offset);
1702 }
1703
1704 /*
1705  * reserve TMR memory at the top of VRAM which holds
1706  * IP Discovery data and is protected by PSP.
1707  */
1708 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1709 {
1710         int ret;
1711         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1712         bool mem_train_support = false;
1713
1714         if (!amdgpu_sriov_vf(adev)) {
1715                 ret = amdgpu_mem_train_support(adev);
1716                 if (ret == 1)
1717                         mem_train_support = true;
1718                 else if (ret == -1)
1719                         return -EINVAL;
1720                 else
1721                         DRM_DEBUG("memory training does not support!\n");
1722         }
1723
1724         /*
1725          * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1726          * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1727          *
1728          * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1729          * discovery data and G6 memory training data respectively
1730          */
1731         adev->mman.discovery_tmr_size =
1732                 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1733         if (!adev->mman.discovery_tmr_size)
1734                 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1735
1736         if (mem_train_support) {
1737                 /* reserve vram for mem train according to TMR location */
1738                 amdgpu_ttm_training_data_block_init(adev);
1739                 ret = amdgpu_bo_create_kernel_at(adev,
1740                                          ctx->c2p_train_data_offset,
1741                                          ctx->train_data_size,
1742                                          AMDGPU_GEM_DOMAIN_VRAM,
1743                                          &ctx->c2p_bo,
1744                                          NULL);
1745                 if (ret) {
1746                         DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1747                         amdgpu_ttm_training_reserve_vram_fini(adev);
1748                         return ret;
1749                 }
1750                 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1751         }
1752
1753         ret = amdgpu_bo_create_kernel_at(adev,
1754                                 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1755                                 adev->mman.discovery_tmr_size,
1756                                 AMDGPU_GEM_DOMAIN_VRAM,
1757                                 &adev->mman.discovery_memory,
1758                                 NULL);
1759         if (ret) {
1760                 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1761                 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1762                 return ret;
1763         }
1764
1765         return 0;
1766 }
1767
1768 /**
1769  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1770  * gtt/vram related fields.
1771  *
1772  * This initializes all of the memory space pools that the TTM layer
1773  * will need such as the GTT space (system memory mapped to the device),
1774  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1775  * can be mapped per VMID.
1776  */
1777 int amdgpu_ttm_init(struct amdgpu_device *adev)
1778 {
1779         uint64_t gtt_size;
1780         int r;
1781         u64 vis_vram_limit;
1782
1783         mutex_init(&adev->mman.gtt_window_lock);
1784
1785         /* No others user of address space so set it to 0 */
1786         r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1787                                adev_to_drm(adev)->anon_inode->i_mapping,
1788                                adev_to_drm(adev)->vma_offset_manager,
1789                                adev->need_swiotlb,
1790                                dma_addressing_limited(adev->dev));
1791         if (r) {
1792                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1793                 return r;
1794         }
1795         adev->mman.initialized = true;
1796
1797         /* Initialize VRAM pool with all of VRAM divided into pages */
1798         r = amdgpu_vram_mgr_init(adev);
1799         if (r) {
1800                 DRM_ERROR("Failed initializing VRAM heap.\n");
1801                 return r;
1802         }
1803
1804         /* Reduce size of CPU-visible VRAM if requested */
1805         vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1806         if (amdgpu_vis_vram_limit > 0 &&
1807             vis_vram_limit <= adev->gmc.visible_vram_size)
1808                 adev->gmc.visible_vram_size = vis_vram_limit;
1809
1810         /* Change the size here instead of the init above so only lpfn is affected */
1811         amdgpu_ttm_set_buffer_funcs_status(adev, false);
1812 #ifdef CONFIG_64BIT
1813         adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1814                                                 adev->gmc.visible_vram_size);
1815 #endif
1816
1817         /*
1818          *The reserved vram for firmware must be pinned to the specified
1819          *place on the VRAM, so reserve it early.
1820          */
1821         r = amdgpu_ttm_fw_reserve_vram_init(adev);
1822         if (r) {
1823                 return r;
1824         }
1825
1826         /*
1827          * only NAVI10 and onwards ASIC support for IP discovery.
1828          * If IP discovery enabled, a block of memory should be
1829          * reserved for IP discovey.
1830          */
1831         if (adev->mman.discovery_bin) {
1832                 r = amdgpu_ttm_reserve_tmr(adev);
1833                 if (r)
1834                         return r;
1835         }
1836
1837         /* allocate memory as required for VGA
1838          * This is used for VGA emulation and pre-OS scanout buffers to
1839          * avoid display artifacts while transitioning between pre-OS
1840          * and driver.  */
1841         r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1842                                        AMDGPU_GEM_DOMAIN_VRAM,
1843                                        &adev->mman.stolen_vga_memory,
1844                                        NULL);
1845         if (r)
1846                 return r;
1847         r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1848                                        adev->mman.stolen_extended_size,
1849                                        AMDGPU_GEM_DOMAIN_VRAM,
1850                                        &adev->mman.stolen_extended_memory,
1851                                        NULL);
1852         if (r)
1853                 return r;
1854
1855         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1856                  (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1857
1858         /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1859          * or whatever the user passed on module init */
1860         if (amdgpu_gtt_size == -1) {
1861                 struct sysinfo si;
1862
1863                 si_meminfo(&si);
1864                 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1865                                adev->gmc.mc_vram_size),
1866                                ((uint64_t)si.totalram * si.mem_unit * 3/4));
1867         }
1868         else
1869                 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1870
1871         /* Initialize GTT memory pool */
1872         r = amdgpu_gtt_mgr_init(adev, gtt_size);
1873         if (r) {
1874                 DRM_ERROR("Failed initializing GTT heap.\n");
1875                 return r;
1876         }
1877         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1878                  (unsigned)(gtt_size / (1024 * 1024)));
1879
1880         /* Initialize various on-chip memory pools */
1881         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1882         if (r) {
1883                 DRM_ERROR("Failed initializing GDS heap.\n");
1884                 return r;
1885         }
1886
1887         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1888         if (r) {
1889                 DRM_ERROR("Failed initializing gws heap.\n");
1890                 return r;
1891         }
1892
1893         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1894         if (r) {
1895                 DRM_ERROR("Failed initializing oa heap.\n");
1896                 return r;
1897         }
1898
1899         return 0;
1900 }
1901
1902 /**
1903  * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1904  */
1905 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1906 {
1907         /* return the VGA stolen memory (if any) back to VRAM */
1908         if (!adev->mman.keep_stolen_vga_memory)
1909                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1910         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1911 }
1912
1913 /**
1914  * amdgpu_ttm_fini - De-initialize the TTM memory pools
1915  */
1916 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1917 {
1918         if (!adev->mman.initialized)
1919                 return;
1920
1921         amdgpu_ttm_training_reserve_vram_fini(adev);
1922         /* return the stolen vga memory back to VRAM */
1923         if (adev->mman.keep_stolen_vga_memory)
1924                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1925         /* return the IP Discovery TMR memory back to VRAM */
1926         amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1927         amdgpu_ttm_fw_reserve_vram_fini(adev);
1928
1929         if (adev->mman.aper_base_kaddr)
1930                 iounmap(adev->mman.aper_base_kaddr);
1931         adev->mman.aper_base_kaddr = NULL;
1932
1933         amdgpu_vram_mgr_fini(adev);
1934         amdgpu_gtt_mgr_fini(adev);
1935         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1936         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1937         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1938         ttm_bo_device_release(&adev->mman.bdev);
1939         adev->mman.initialized = false;
1940         DRM_INFO("amdgpu: ttm finalized\n");
1941 }
1942
1943 /**
1944  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1945  *
1946  * @adev: amdgpu_device pointer
1947  * @enable: true when we can use buffer functions.
1948  *
1949  * Enable/disable use of buffer functions during suspend/resume. This should
1950  * only be called at bootup or when userspace isn't running.
1951  */
1952 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1953 {
1954         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1955         uint64_t size;
1956         int r;
1957
1958         if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1959             adev->mman.buffer_funcs_enabled == enable)
1960                 return;
1961
1962         if (enable) {
1963                 struct amdgpu_ring *ring;
1964                 struct drm_gpu_scheduler *sched;
1965
1966                 ring = adev->mman.buffer_funcs_ring;
1967                 sched = &ring->sched;
1968                 r = drm_sched_entity_init(&adev->mman.entity,
1969                                           DRM_SCHED_PRIORITY_KERNEL, &sched,
1970                                           1, NULL);
1971                 if (r) {
1972                         DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1973                                   r);
1974                         return;
1975                 }
1976         } else {
1977                 drm_sched_entity_destroy(&adev->mman.entity);
1978                 dma_fence_put(man->move);
1979                 man->move = NULL;
1980         }
1981
1982         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1983         if (enable)
1984                 size = adev->gmc.real_vram_size;
1985         else
1986                 size = adev->gmc.visible_vram_size;
1987         man->size = size >> PAGE_SHIFT;
1988         adev->mman.buffer_funcs_enabled = enable;
1989 }
1990
1991 static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
1992 {
1993         struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
1994         vm_fault_t ret;
1995
1996         ret = ttm_bo_vm_reserve(bo, vmf);
1997         if (ret)
1998                 return ret;
1999
2000         ret = amdgpu_bo_fault_reserve_notify(bo);
2001         if (ret)
2002                 goto unlock;
2003
2004         ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
2005                                        TTM_BO_VM_NUM_PREFAULT, 1);
2006         if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
2007                 return ret;
2008
2009 unlock:
2010         dma_resv_unlock(bo->base.resv);
2011         return ret;
2012 }
2013
2014 static struct vm_operations_struct amdgpu_ttm_vm_ops = {
2015         .fault = amdgpu_ttm_fault,
2016         .open = ttm_bo_vm_open,
2017         .close = ttm_bo_vm_close,
2018         .access = ttm_bo_vm_access
2019 };
2020
2021 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
2022 {
2023         struct drm_file *file_priv = filp->private_data;
2024         struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
2025         int r;
2026
2027         r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
2028         if (unlikely(r != 0))
2029                 return r;
2030
2031         vma->vm_ops = &amdgpu_ttm_vm_ops;
2032         return 0;
2033 }
2034
2035 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2036                        uint64_t dst_offset, uint32_t byte_count,
2037                        struct dma_resv *resv,
2038                        struct dma_fence **fence, bool direct_submit,
2039                        bool vm_needs_flush, bool tmz)
2040 {
2041         enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
2042                 AMDGPU_IB_POOL_DELAYED;
2043         struct amdgpu_device *adev = ring->adev;
2044         struct amdgpu_job *job;
2045
2046         uint32_t max_bytes;
2047         unsigned num_loops, num_dw;
2048         unsigned i;
2049         int r;
2050
2051         if (direct_submit && !ring->sched.ready) {
2052                 DRM_ERROR("Trying to move memory with ring turned off.\n");
2053                 return -EINVAL;
2054         }
2055
2056         max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2057         num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2058         num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2059
2060         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
2061         if (r)
2062                 return r;
2063
2064         if (vm_needs_flush) {
2065                 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2066                 job->vm_needs_flush = true;
2067         }
2068         if (resv) {
2069                 r = amdgpu_sync_resv(adev, &job->sync, resv,
2070                                      AMDGPU_SYNC_ALWAYS,
2071                                      AMDGPU_FENCE_OWNER_UNDEFINED);
2072                 if (r) {
2073                         DRM_ERROR("sync failed (%d).\n", r);
2074                         goto error_free;
2075                 }
2076         }
2077
2078         for (i = 0; i < num_loops; i++) {
2079                 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2080
2081                 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2082                                         dst_offset, cur_size_in_bytes, tmz);
2083
2084                 src_offset += cur_size_in_bytes;
2085                 dst_offset += cur_size_in_bytes;
2086                 byte_count -= cur_size_in_bytes;
2087         }
2088
2089         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2090         WARN_ON(job->ibs[0].length_dw > num_dw);
2091         if (direct_submit)
2092                 r = amdgpu_job_submit_direct(job, ring, fence);
2093         else
2094                 r = amdgpu_job_submit(job, &adev->mman.entity,
2095                                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2096         if (r)
2097                 goto error_free;
2098
2099         return r;
2100
2101 error_free:
2102         amdgpu_job_free(job);
2103         DRM_ERROR("Error scheduling IBs (%d)\n", r);
2104         return r;
2105 }
2106
2107 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2108                        uint32_t src_data,
2109                        struct dma_resv *resv,
2110                        struct dma_fence **fence)
2111 {
2112         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2113         uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2114         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2115
2116         struct drm_mm_node *mm_node;
2117         unsigned long num_pages;
2118         unsigned int num_loops, num_dw;
2119
2120         struct amdgpu_job *job;
2121         int r;
2122
2123         if (!adev->mman.buffer_funcs_enabled) {
2124                 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2125                 return -EINVAL;
2126         }
2127
2128         if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2129                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2130                 if (r)
2131                         return r;
2132         }
2133
2134         num_pages = bo->tbo.mem.num_pages;
2135         mm_node = bo->tbo.mem.mm_node;
2136         num_loops = 0;
2137         while (num_pages) {
2138                 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2139
2140                 num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2141                 num_pages -= mm_node->size;
2142                 ++mm_node;
2143         }
2144         num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2145
2146         /* for IB padding */
2147         num_dw += 64;
2148
2149         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2150                                      &job);
2151         if (r)
2152                 return r;
2153
2154         if (resv) {
2155                 r = amdgpu_sync_resv(adev, &job->sync, resv,
2156                                      AMDGPU_SYNC_ALWAYS,
2157                                      AMDGPU_FENCE_OWNER_UNDEFINED);
2158                 if (r) {
2159                         DRM_ERROR("sync failed (%d).\n", r);
2160                         goto error_free;
2161                 }
2162         }
2163
2164         num_pages = bo->tbo.mem.num_pages;
2165         mm_node = bo->tbo.mem.mm_node;
2166
2167         while (num_pages) {
2168                 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2169                 uint64_t dst_addr;
2170
2171                 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2172                 while (byte_count) {
2173                         uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2174                                                            max_bytes);
2175
2176                         amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2177                                                 dst_addr, cur_size_in_bytes);
2178
2179                         dst_addr += cur_size_in_bytes;
2180                         byte_count -= cur_size_in_bytes;
2181                 }
2182
2183                 num_pages -= mm_node->size;
2184                 ++mm_node;
2185         }
2186
2187         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2188         WARN_ON(job->ibs[0].length_dw > num_dw);
2189         r = amdgpu_job_submit(job, &adev->mman.entity,
2190                               AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2191         if (r)
2192                 goto error_free;
2193
2194         return 0;
2195
2196 error_free:
2197         amdgpu_job_free(job);
2198         return r;
2199 }
2200
2201 #if defined(CONFIG_DEBUG_FS)
2202
2203 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2204 {
2205         struct drm_info_node *node = (struct drm_info_node *)m->private;
2206         unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2207         struct drm_device *dev = node->minor->dev;
2208         struct amdgpu_device *adev = drm_to_adev(dev);
2209         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
2210         struct drm_printer p = drm_seq_file_printer(m);
2211
2212         man->func->debug(man, &p);
2213         return 0;
2214 }
2215
2216 static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
2217 {
2218         struct drm_info_node *node = (struct drm_info_node *)m->private;
2219         struct drm_device *dev = node->minor->dev;
2220         struct amdgpu_device *adev = drm_to_adev(dev);
2221
2222         return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2223 }
2224
2225 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2226         {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2227         {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2228         {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2229         {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2230         {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2231         {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
2232 };
2233
2234 /**
2235  * amdgpu_ttm_vram_read - Linear read access to VRAM
2236  *
2237  * Accesses VRAM via MMIO for debugging purposes.
2238  */
2239 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2240                                     size_t size, loff_t *pos)
2241 {
2242         struct amdgpu_device *adev = file_inode(f)->i_private;
2243         ssize_t result = 0;
2244
2245         if (size & 0x3 || *pos & 0x3)
2246                 return -EINVAL;
2247
2248         if (*pos >= adev->gmc.mc_vram_size)
2249                 return -ENXIO;
2250
2251         size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2252         while (size) {
2253                 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2254                 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2255
2256                 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2257                 if (copy_to_user(buf, value, bytes))
2258                         return -EFAULT;
2259
2260                 result += bytes;
2261                 buf += bytes;
2262                 *pos += bytes;
2263                 size -= bytes;
2264         }
2265
2266         return result;
2267 }
2268
2269 /**
2270  * amdgpu_ttm_vram_write - Linear write access to VRAM
2271  *
2272  * Accesses VRAM via MMIO for debugging purposes.
2273  */
2274 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2275                                     size_t size, loff_t *pos)
2276 {
2277         struct amdgpu_device *adev = file_inode(f)->i_private;
2278         ssize_t result = 0;
2279         int r;
2280
2281         if (size & 0x3 || *pos & 0x3)
2282                 return -EINVAL;
2283
2284         if (*pos >= adev->gmc.mc_vram_size)
2285                 return -ENXIO;
2286
2287         while (size) {
2288                 unsigned long flags;
2289                 uint32_t value;
2290
2291                 if (*pos >= adev->gmc.mc_vram_size)
2292                         return result;
2293
2294                 r = get_user(value, (uint32_t *)buf);
2295                 if (r)
2296                         return r;
2297
2298                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2299                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2300                 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2301                 WREG32_NO_KIQ(mmMM_DATA, value);
2302                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2303
2304                 result += 4;
2305                 buf += 4;
2306                 *pos += 4;
2307                 size -= 4;
2308         }
2309
2310         return result;
2311 }
2312
2313 static const struct file_operations amdgpu_ttm_vram_fops = {
2314         .owner = THIS_MODULE,
2315         .read = amdgpu_ttm_vram_read,
2316         .write = amdgpu_ttm_vram_write,
2317         .llseek = default_llseek,
2318 };
2319
2320 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2321
2322 /**
2323  * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2324  */
2325 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2326                                    size_t size, loff_t *pos)
2327 {
2328         struct amdgpu_device *adev = file_inode(f)->i_private;
2329         ssize_t result = 0;
2330         int r;
2331
2332         while (size) {
2333                 loff_t p = *pos / PAGE_SIZE;
2334                 unsigned off = *pos & ~PAGE_MASK;
2335                 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2336                 struct page *page;
2337                 void *ptr;
2338
2339                 if (p >= adev->gart.num_cpu_pages)
2340                         return result;
2341
2342                 page = adev->gart.pages[p];
2343                 if (page) {
2344                         ptr = kmap(page);
2345                         ptr += off;
2346
2347                         r = copy_to_user(buf, ptr, cur_size);
2348                         kunmap(adev->gart.pages[p]);
2349                 } else
2350                         r = clear_user(buf, cur_size);
2351
2352                 if (r)
2353                         return -EFAULT;
2354
2355                 result += cur_size;
2356                 buf += cur_size;
2357                 *pos += cur_size;
2358                 size -= cur_size;
2359         }
2360
2361         return result;
2362 }
2363
2364 static const struct file_operations amdgpu_ttm_gtt_fops = {
2365         .owner = THIS_MODULE,
2366         .read = amdgpu_ttm_gtt_read,
2367         .llseek = default_llseek
2368 };
2369
2370 #endif
2371
2372 /**
2373  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2374  *
2375  * This function is used to read memory that has been mapped to the
2376  * GPU and the known addresses are not physical addresses but instead
2377  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2378  */
2379 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2380                                  size_t size, loff_t *pos)
2381 {
2382         struct amdgpu_device *adev = file_inode(f)->i_private;
2383         struct iommu_domain *dom;
2384         ssize_t result = 0;
2385         int r;
2386
2387         /* retrieve the IOMMU domain if any for this device */
2388         dom = iommu_get_domain_for_dev(adev->dev);
2389
2390         while (size) {
2391                 phys_addr_t addr = *pos & PAGE_MASK;
2392                 loff_t off = *pos & ~PAGE_MASK;
2393                 size_t bytes = PAGE_SIZE - off;
2394                 unsigned long pfn;
2395                 struct page *p;
2396                 void *ptr;
2397
2398                 bytes = bytes < size ? bytes : size;
2399
2400                 /* Translate the bus address to a physical address.  If
2401                  * the domain is NULL it means there is no IOMMU active
2402                  * and the address translation is the identity
2403                  */
2404                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2405
2406                 pfn = addr >> PAGE_SHIFT;
2407                 if (!pfn_valid(pfn))
2408                         return -EPERM;
2409
2410                 p = pfn_to_page(pfn);
2411                 if (p->mapping != adev->mman.bdev.dev_mapping)
2412                         return -EPERM;
2413
2414                 ptr = kmap(p);
2415                 r = copy_to_user(buf, ptr + off, bytes);
2416                 kunmap(p);
2417                 if (r)
2418                         return -EFAULT;
2419
2420                 size -= bytes;
2421                 *pos += bytes;
2422                 result += bytes;
2423         }
2424
2425         return result;
2426 }
2427
2428 /**
2429  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2430  *
2431  * This function is used to write memory that has been mapped to the
2432  * GPU and the known addresses are not physical addresses but instead
2433  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2434  */
2435 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2436                                  size_t size, loff_t *pos)
2437 {
2438         struct amdgpu_device *adev = file_inode(f)->i_private;
2439         struct iommu_domain *dom;
2440         ssize_t result = 0;
2441         int r;
2442
2443         dom = iommu_get_domain_for_dev(adev->dev);
2444
2445         while (size) {
2446                 phys_addr_t addr = *pos & PAGE_MASK;
2447                 loff_t off = *pos & ~PAGE_MASK;
2448                 size_t bytes = PAGE_SIZE - off;
2449                 unsigned long pfn;
2450                 struct page *p;
2451                 void *ptr;
2452
2453                 bytes = bytes < size ? bytes : size;
2454
2455                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2456
2457                 pfn = addr >> PAGE_SHIFT;
2458                 if (!pfn_valid(pfn))
2459                         return -EPERM;
2460
2461                 p = pfn_to_page(pfn);
2462                 if (p->mapping != adev->mman.bdev.dev_mapping)
2463                         return -EPERM;
2464
2465                 ptr = kmap(p);
2466                 r = copy_from_user(ptr + off, buf, bytes);
2467                 kunmap(p);
2468                 if (r)
2469                         return -EFAULT;
2470
2471                 size -= bytes;
2472                 *pos += bytes;
2473                 result += bytes;
2474         }
2475
2476         return result;
2477 }
2478
2479 static const struct file_operations amdgpu_ttm_iomem_fops = {
2480         .owner = THIS_MODULE,
2481         .read = amdgpu_iomem_read,
2482         .write = amdgpu_iomem_write,
2483         .llseek = default_llseek
2484 };
2485
2486 static const struct {
2487         char *name;
2488         const struct file_operations *fops;
2489         int domain;
2490 } ttm_debugfs_entries[] = {
2491         { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2492 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2493         { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2494 #endif
2495         { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2496 };
2497
2498 #endif
2499
2500 int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2501 {
2502 #if defined(CONFIG_DEBUG_FS)
2503         unsigned count;
2504
2505         struct drm_minor *minor = adev_to_drm(adev)->primary;
2506         struct dentry *ent, *root = minor->debugfs_root;
2507
2508         for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2509                 ent = debugfs_create_file(
2510                                 ttm_debugfs_entries[count].name,
2511                                 S_IFREG | S_IRUGO, root,
2512                                 adev,
2513                                 ttm_debugfs_entries[count].fops);
2514                 if (IS_ERR(ent))
2515                         return PTR_ERR(ent);
2516                 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2517                         i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2518                 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2519                         i_size_write(ent->d_inode, adev->gmc.gart_size);
2520                 adev->mman.debugfs_entries[count] = ent;
2521         }
2522
2523         count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2524         return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2525 #else
2526         return 0;
2527 #endif
2528 }
This page took 0.203094 seconds and 4 git commands to generate.