]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drm/amdgpu/gmc: steal the appropriate amount of vram for fw hand-over (v3)
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include <linux/iommu.h>
46 #include "amdgpu.h"
47 #include "amdgpu_object.h"
48 #include "amdgpu_trace.h"
49 #include "amdgpu_amdkfd.h"
50 #include "bif/bif_4_1_d.h"
51
52 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53
54 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
55                              struct ttm_mem_reg *mem, unsigned num_pages,
56                              uint64_t offset, unsigned window,
57                              struct amdgpu_ring *ring,
58                              uint64_t *addr);
59
60 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
61 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
62
63 /*
64  * Global memory.
65  */
66 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
67 {
68         return ttm_mem_global_init(ref->object);
69 }
70
71 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
72 {
73         ttm_mem_global_release(ref->object);
74 }
75
76 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
77 {
78         struct drm_global_reference *global_ref;
79         struct amdgpu_ring *ring;
80         struct drm_sched_rq *rq;
81         int r;
82
83         adev->mman.mem_global_referenced = false;
84         global_ref = &adev->mman.mem_global_ref;
85         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
86         global_ref->size = sizeof(struct ttm_mem_global);
87         global_ref->init = &amdgpu_ttm_mem_global_init;
88         global_ref->release = &amdgpu_ttm_mem_global_release;
89         r = drm_global_item_ref(global_ref);
90         if (r) {
91                 DRM_ERROR("Failed setting up TTM memory accounting "
92                           "subsystem.\n");
93                 goto error_mem;
94         }
95
96         adev->mman.bo_global_ref.mem_glob =
97                 adev->mman.mem_global_ref.object;
98         global_ref = &adev->mman.bo_global_ref.ref;
99         global_ref->global_type = DRM_GLOBAL_TTM_BO;
100         global_ref->size = sizeof(struct ttm_bo_global);
101         global_ref->init = &ttm_bo_global_init;
102         global_ref->release = &ttm_bo_global_release;
103         r = drm_global_item_ref(global_ref);
104         if (r) {
105                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
106                 goto error_bo;
107         }
108
109         mutex_init(&adev->mman.gtt_window_lock);
110
111         ring = adev->mman.buffer_funcs_ring;
112         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
113         r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
114                                   rq, amdgpu_sched_jobs, NULL);
115         if (r) {
116                 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
117                 goto error_entity;
118         }
119
120         adev->mman.mem_global_referenced = true;
121
122         return 0;
123
124 error_entity:
125         drm_global_item_unref(&adev->mman.bo_global_ref.ref);
126 error_bo:
127         drm_global_item_unref(&adev->mman.mem_global_ref);
128 error_mem:
129         return r;
130 }
131
132 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
133 {
134         if (adev->mman.mem_global_referenced) {
135                 drm_sched_entity_fini(adev->mman.entity.sched,
136                                       &adev->mman.entity);
137                 mutex_destroy(&adev->mman.gtt_window_lock);
138                 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
139                 drm_global_item_unref(&adev->mman.mem_global_ref);
140                 adev->mman.mem_global_referenced = false;
141         }
142 }
143
144 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
145 {
146         return 0;
147 }
148
149 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150                                 struct ttm_mem_type_manager *man)
151 {
152         struct amdgpu_device *adev;
153
154         adev = amdgpu_ttm_adev(bdev);
155
156         switch (type) {
157         case TTM_PL_SYSTEM:
158                 /* System memory */
159                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
160                 man->available_caching = TTM_PL_MASK_CACHING;
161                 man->default_caching = TTM_PL_FLAG_CACHED;
162                 break;
163         case TTM_PL_TT:
164                 man->func = &amdgpu_gtt_mgr_func;
165                 man->gpu_offset = adev->gmc.gart_start;
166                 man->available_caching = TTM_PL_MASK_CACHING;
167                 man->default_caching = TTM_PL_FLAG_CACHED;
168                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
169                 break;
170         case TTM_PL_VRAM:
171                 /* "On-card" video ram */
172                 man->func = &amdgpu_vram_mgr_func;
173                 man->gpu_offset = adev->gmc.vram_start;
174                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
175                              TTM_MEMTYPE_FLAG_MAPPABLE;
176                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
177                 man->default_caching = TTM_PL_FLAG_WC;
178                 break;
179         case AMDGPU_PL_GDS:
180         case AMDGPU_PL_GWS:
181         case AMDGPU_PL_OA:
182                 /* On-chip GDS memory*/
183                 man->func = &ttm_bo_manager_func;
184                 man->gpu_offset = 0;
185                 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
186                 man->available_caching = TTM_PL_FLAG_UNCACHED;
187                 man->default_caching = TTM_PL_FLAG_UNCACHED;
188                 break;
189         default:
190                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
191                 return -EINVAL;
192         }
193         return 0;
194 }
195
196 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
197                                 struct ttm_placement *placement)
198 {
199         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
200         struct amdgpu_bo *abo;
201         static const struct ttm_place placements = {
202                 .fpfn = 0,
203                 .lpfn = 0,
204                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
205         };
206
207         if (bo->type == ttm_bo_type_sg) {
208                 placement->num_placement = 0;
209                 placement->num_busy_placement = 0;
210                 return;
211         }
212
213         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
214                 placement->placement = &placements;
215                 placement->busy_placement = &placements;
216                 placement->num_placement = 1;
217                 placement->num_busy_placement = 1;
218                 return;
219         }
220         abo = ttm_to_amdgpu_bo(bo);
221         switch (bo->mem.mem_type) {
222         case TTM_PL_VRAM:
223                 if (!adev->mman.buffer_funcs_enabled) {
224                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
225                 } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
226                            !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
227                            amdgpu_bo_in_cpu_visible_vram(abo)) {
228
229                         /* Try evicting to the CPU inaccessible part of VRAM
230                          * first, but only set GTT as busy placement, so this
231                          * BO will be evicted to GTT rather than causing other
232                          * BOs to be evicted from VRAM
233                          */
234                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
235                                                          AMDGPU_GEM_DOMAIN_GTT);
236                         abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
237                         abo->placements[0].lpfn = 0;
238                         abo->placement.busy_placement = &abo->placements[1];
239                         abo->placement.num_busy_placement = 1;
240                 } else {
241                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
242                 }
243                 break;
244         case TTM_PL_TT:
245         default:
246                 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
247         }
248         *placement = abo->placement;
249 }
250
251 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
252 {
253         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
254
255         /*
256          * Don't verify access for KFD BOs. They don't have a GEM
257          * object associated with them.
258          */
259         if (abo->kfd_bo)
260                 return 0;
261
262         if (amdgpu_ttm_tt_get_usermm(bo->ttm))
263                 return -EPERM;
264         return drm_vma_node_verify_access(&abo->gem_base.vma_node,
265                                           filp->private_data);
266 }
267
268 static void amdgpu_move_null(struct ttm_buffer_object *bo,
269                              struct ttm_mem_reg *new_mem)
270 {
271         struct ttm_mem_reg *old_mem = &bo->mem;
272
273         BUG_ON(old_mem->mm_node != NULL);
274         *old_mem = *new_mem;
275         new_mem->mm_node = NULL;
276 }
277
278 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
279                                     struct drm_mm_node *mm_node,
280                                     struct ttm_mem_reg *mem)
281 {
282         uint64_t addr = 0;
283
284         if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
285                 addr = mm_node->start << PAGE_SHIFT;
286                 addr += bo->bdev->man[mem->mem_type].gpu_offset;
287         }
288         return addr;
289 }
290
291 /**
292  * amdgpu_find_mm_node - Helper function finds the drm_mm_node
293  *  corresponding to @offset. It also modifies the offset to be
294  *  within the drm_mm_node returned
295  */
296 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
297                                                unsigned long *offset)
298 {
299         struct drm_mm_node *mm_node = mem->mm_node;
300
301         while (*offset >= (mm_node->size << PAGE_SHIFT)) {
302                 *offset -= (mm_node->size << PAGE_SHIFT);
303                 ++mm_node;
304         }
305         return mm_node;
306 }
307
308 /**
309  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
310  *
311  * The function copies @size bytes from {src->mem + src->offset} to
312  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
313  * move and different for a BO to BO copy.
314  *
315  * @f: Returns the last fence if multiple jobs are submitted.
316  */
317 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
318                                struct amdgpu_copy_mem *src,
319                                struct amdgpu_copy_mem *dst,
320                                uint64_t size,
321                                struct reservation_object *resv,
322                                struct dma_fence **f)
323 {
324         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
325         struct drm_mm_node *src_mm, *dst_mm;
326         uint64_t src_node_start, dst_node_start, src_node_size,
327                  dst_node_size, src_page_offset, dst_page_offset;
328         struct dma_fence *fence = NULL;
329         int r = 0;
330         const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
331                                         AMDGPU_GPU_PAGE_SIZE);
332
333         if (!adev->mman.buffer_funcs_enabled) {
334                 DRM_ERROR("Trying to move memory with ring turned off.\n");
335                 return -EINVAL;
336         }
337
338         src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
339         src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
340                                              src->offset;
341         src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
342         src_page_offset = src_node_start & (PAGE_SIZE - 1);
343
344         dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
345         dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
346                                              dst->offset;
347         dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
348         dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
349
350         mutex_lock(&adev->mman.gtt_window_lock);
351
352         while (size) {
353                 unsigned long cur_size;
354                 uint64_t from = src_node_start, to = dst_node_start;
355                 struct dma_fence *next;
356
357                 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
358                  * begins at an offset, then adjust the size accordingly
359                  */
360                 cur_size = min3(min(src_node_size, dst_node_size), size,
361                                 GTT_MAX_BYTES);
362                 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
363                     cur_size + dst_page_offset > GTT_MAX_BYTES)
364                         cur_size -= max(src_page_offset, dst_page_offset);
365
366                 /* Map only what needs to be accessed. Map src to window 0 and
367                  * dst to window 1
368                  */
369                 if (src->mem->mem_type == TTM_PL_TT &&
370                     !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
371                         r = amdgpu_map_buffer(src->bo, src->mem,
372                                         PFN_UP(cur_size + src_page_offset),
373                                         src_node_start, 0, ring,
374                                         &from);
375                         if (r)
376                                 goto error;
377                         /* Adjust the offset because amdgpu_map_buffer returns
378                          * start of mapped page
379                          */
380                         from += src_page_offset;
381                 }
382
383                 if (dst->mem->mem_type == TTM_PL_TT &&
384                     !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
385                         r = amdgpu_map_buffer(dst->bo, dst->mem,
386                                         PFN_UP(cur_size + dst_page_offset),
387                                         dst_node_start, 1, ring,
388                                         &to);
389                         if (r)
390                                 goto error;
391                         to += dst_page_offset;
392                 }
393
394                 r = amdgpu_copy_buffer(ring, from, to, cur_size,
395                                        resv, &next, false, true);
396                 if (r)
397                         goto error;
398
399                 dma_fence_put(fence);
400                 fence = next;
401
402                 size -= cur_size;
403                 if (!size)
404                         break;
405
406                 src_node_size -= cur_size;
407                 if (!src_node_size) {
408                         src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
409                                                              src->mem);
410                         src_node_size = (src_mm->size << PAGE_SHIFT);
411                 } else {
412                         src_node_start += cur_size;
413                         src_page_offset = src_node_start & (PAGE_SIZE - 1);
414                 }
415                 dst_node_size -= cur_size;
416                 if (!dst_node_size) {
417                         dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
418                                                              dst->mem);
419                         dst_node_size = (dst_mm->size << PAGE_SHIFT);
420                 } else {
421                         dst_node_start += cur_size;
422                         dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
423                 }
424         }
425 error:
426         mutex_unlock(&adev->mman.gtt_window_lock);
427         if (f)
428                 *f = dma_fence_get(fence);
429         dma_fence_put(fence);
430         return r;
431 }
432
433
434 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
435                             bool evict, bool no_wait_gpu,
436                             struct ttm_mem_reg *new_mem,
437                             struct ttm_mem_reg *old_mem)
438 {
439         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
440         struct amdgpu_copy_mem src, dst;
441         struct dma_fence *fence = NULL;
442         int r;
443
444         src.bo = bo;
445         dst.bo = bo;
446         src.mem = old_mem;
447         dst.mem = new_mem;
448         src.offset = 0;
449         dst.offset = 0;
450
451         r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
452                                        new_mem->num_pages << PAGE_SHIFT,
453                                        bo->resv, &fence);
454         if (r)
455                 goto error;
456
457         r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
458         dma_fence_put(fence);
459         return r;
460
461 error:
462         if (fence)
463                 dma_fence_wait(fence, false);
464         dma_fence_put(fence);
465         return r;
466 }
467
468 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
469                                 struct ttm_operation_ctx *ctx,
470                                 struct ttm_mem_reg *new_mem)
471 {
472         struct amdgpu_device *adev;
473         struct ttm_mem_reg *old_mem = &bo->mem;
474         struct ttm_mem_reg tmp_mem;
475         struct ttm_place placements;
476         struct ttm_placement placement;
477         int r;
478
479         adev = amdgpu_ttm_adev(bo->bdev);
480         tmp_mem = *new_mem;
481         tmp_mem.mm_node = NULL;
482         placement.num_placement = 1;
483         placement.placement = &placements;
484         placement.num_busy_placement = 1;
485         placement.busy_placement = &placements;
486         placements.fpfn = 0;
487         placements.lpfn = 0;
488         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
489         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
490         if (unlikely(r)) {
491                 return r;
492         }
493
494         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
495         if (unlikely(r)) {
496                 goto out_cleanup;
497         }
498
499         r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
500         if (unlikely(r)) {
501                 goto out_cleanup;
502         }
503         r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
504         if (unlikely(r)) {
505                 goto out_cleanup;
506         }
507         r = ttm_bo_move_ttm(bo, ctx, new_mem);
508 out_cleanup:
509         ttm_bo_mem_put(bo, &tmp_mem);
510         return r;
511 }
512
513 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
514                                 struct ttm_operation_ctx *ctx,
515                                 struct ttm_mem_reg *new_mem)
516 {
517         struct amdgpu_device *adev;
518         struct ttm_mem_reg *old_mem = &bo->mem;
519         struct ttm_mem_reg tmp_mem;
520         struct ttm_placement placement;
521         struct ttm_place placements;
522         int r;
523
524         adev = amdgpu_ttm_adev(bo->bdev);
525         tmp_mem = *new_mem;
526         tmp_mem.mm_node = NULL;
527         placement.num_placement = 1;
528         placement.placement = &placements;
529         placement.num_busy_placement = 1;
530         placement.busy_placement = &placements;
531         placements.fpfn = 0;
532         placements.lpfn = 0;
533         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
534         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
535         if (unlikely(r)) {
536                 return r;
537         }
538         r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
539         if (unlikely(r)) {
540                 goto out_cleanup;
541         }
542         r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
543         if (unlikely(r)) {
544                 goto out_cleanup;
545         }
546 out_cleanup:
547         ttm_bo_mem_put(bo, &tmp_mem);
548         return r;
549 }
550
551 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
552                           struct ttm_operation_ctx *ctx,
553                           struct ttm_mem_reg *new_mem)
554 {
555         struct amdgpu_device *adev;
556         struct amdgpu_bo *abo;
557         struct ttm_mem_reg *old_mem = &bo->mem;
558         int r;
559
560         /* Can't move a pinned BO */
561         abo = ttm_to_amdgpu_bo(bo);
562         if (WARN_ON_ONCE(abo->pin_count > 0))
563                 return -EINVAL;
564
565         adev = amdgpu_ttm_adev(bo->bdev);
566
567         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
568                 amdgpu_move_null(bo, new_mem);
569                 return 0;
570         }
571         if ((old_mem->mem_type == TTM_PL_TT &&
572              new_mem->mem_type == TTM_PL_SYSTEM) ||
573             (old_mem->mem_type == TTM_PL_SYSTEM &&
574              new_mem->mem_type == TTM_PL_TT)) {
575                 /* bind is enough */
576                 amdgpu_move_null(bo, new_mem);
577                 return 0;
578         }
579
580         if (!adev->mman.buffer_funcs_enabled)
581                 goto memcpy;
582
583         if (old_mem->mem_type == TTM_PL_VRAM &&
584             new_mem->mem_type == TTM_PL_SYSTEM) {
585                 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
586         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
587                    new_mem->mem_type == TTM_PL_VRAM) {
588                 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
589         } else {
590                 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
591                                      new_mem, old_mem);
592         }
593
594         if (r) {
595 memcpy:
596                 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
597                 if (r) {
598                         return r;
599                 }
600         }
601
602         if (bo->type == ttm_bo_type_device &&
603             new_mem->mem_type == TTM_PL_VRAM &&
604             old_mem->mem_type != TTM_PL_VRAM) {
605                 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
606                  * accesses the BO after it's moved.
607                  */
608                 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
609         }
610
611         /* update statistics */
612         atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
613         return 0;
614 }
615
616 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
617 {
618         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
619         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
620         struct drm_mm_node *mm_node = mem->mm_node;
621
622         mem->bus.addr = NULL;
623         mem->bus.offset = 0;
624         mem->bus.size = mem->num_pages << PAGE_SHIFT;
625         mem->bus.base = 0;
626         mem->bus.is_iomem = false;
627         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
628                 return -EINVAL;
629         switch (mem->mem_type) {
630         case TTM_PL_SYSTEM:
631                 /* system memory */
632                 return 0;
633         case TTM_PL_TT:
634                 break;
635         case TTM_PL_VRAM:
636                 mem->bus.offset = mem->start << PAGE_SHIFT;
637                 /* check if it's visible */
638                 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
639                         return -EINVAL;
640                 /* Only physically contiguous buffers apply. In a contiguous
641                  * buffer, size of the first mm_node would match the number of
642                  * pages in ttm_mem_reg.
643                  */
644                 if (adev->mman.aper_base_kaddr &&
645                     (mm_node->size == mem->num_pages))
646                         mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
647                                         mem->bus.offset;
648
649                 mem->bus.base = adev->gmc.aper_base;
650                 mem->bus.is_iomem = true;
651                 break;
652         default:
653                 return -EINVAL;
654         }
655         return 0;
656 }
657
658 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
659 {
660 }
661
662 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
663                                            unsigned long page_offset)
664 {
665         struct drm_mm_node *mm;
666         unsigned long offset = (page_offset << PAGE_SHIFT);
667
668         mm = amdgpu_find_mm_node(&bo->mem, &offset);
669         return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
670                 (offset >> PAGE_SHIFT);
671 }
672
673 /*
674  * TTM backend functions.
675  */
676 struct amdgpu_ttm_gup_task_list {
677         struct list_head        list;
678         struct task_struct      *task;
679 };
680
681 struct amdgpu_ttm_tt {
682         struct ttm_dma_tt       ttm;
683         u64                     offset;
684         uint64_t                userptr;
685         struct mm_struct        *usermm;
686         uint32_t                userflags;
687         spinlock_t              guptasklock;
688         struct list_head        guptasks;
689         atomic_t                mmu_invalidations;
690         uint32_t                last_set_pages;
691 };
692
693 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
694 {
695         struct amdgpu_ttm_tt *gtt = (void *)ttm;
696         unsigned int flags = 0;
697         unsigned pinned = 0;
698         int r;
699
700         if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
701                 flags |= FOLL_WRITE;
702
703         down_read(&current->mm->mmap_sem);
704
705         if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
706                 /* check that we only use anonymous memory
707                    to prevent problems with writeback */
708                 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
709                 struct vm_area_struct *vma;
710
711                 vma = find_vma(gtt->usermm, gtt->userptr);
712                 if (!vma || vma->vm_file || vma->vm_end < end) {
713                         up_read(&current->mm->mmap_sem);
714                         return -EPERM;
715                 }
716         }
717
718         do {
719                 unsigned num_pages = ttm->num_pages - pinned;
720                 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
721                 struct page **p = pages + pinned;
722                 struct amdgpu_ttm_gup_task_list guptask;
723
724                 guptask.task = current;
725                 spin_lock(&gtt->guptasklock);
726                 list_add(&guptask.list, &gtt->guptasks);
727                 spin_unlock(&gtt->guptasklock);
728
729                 r = get_user_pages(userptr, num_pages, flags, p, NULL);
730
731                 spin_lock(&gtt->guptasklock);
732                 list_del(&guptask.list);
733                 spin_unlock(&gtt->guptasklock);
734
735                 if (r < 0)
736                         goto release_pages;
737
738                 pinned += r;
739
740         } while (pinned < ttm->num_pages);
741
742         up_read(&current->mm->mmap_sem);
743         return 0;
744
745 release_pages:
746         release_pages(pages, pinned);
747         up_read(&current->mm->mmap_sem);
748         return r;
749 }
750
751 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
752 {
753         struct amdgpu_ttm_tt *gtt = (void *)ttm;
754         unsigned i;
755
756         gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
757         for (i = 0; i < ttm->num_pages; ++i) {
758                 if (ttm->pages[i])
759                         put_page(ttm->pages[i]);
760
761                 ttm->pages[i] = pages ? pages[i] : NULL;
762         }
763 }
764
765 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
766 {
767         struct amdgpu_ttm_tt *gtt = (void *)ttm;
768         unsigned i;
769
770         for (i = 0; i < ttm->num_pages; ++i) {
771                 struct page *page = ttm->pages[i];
772
773                 if (!page)
774                         continue;
775
776                 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
777                         set_page_dirty(page);
778
779                 mark_page_accessed(page);
780         }
781 }
782
783 /* prepare the sg table with the user pages */
784 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
785 {
786         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
787         struct amdgpu_ttm_tt *gtt = (void *)ttm;
788         unsigned nents;
789         int r;
790
791         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
792         enum dma_data_direction direction = write ?
793                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
794
795         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
796                                       ttm->num_pages << PAGE_SHIFT,
797                                       GFP_KERNEL);
798         if (r)
799                 goto release_sg;
800
801         r = -ENOMEM;
802         nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
803         if (nents != ttm->sg->nents)
804                 goto release_sg;
805
806         drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
807                                          gtt->ttm.dma_address, ttm->num_pages);
808
809         return 0;
810
811 release_sg:
812         kfree(ttm->sg);
813         return r;
814 }
815
816 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
817 {
818         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
819         struct amdgpu_ttm_tt *gtt = (void *)ttm;
820
821         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
822         enum dma_data_direction direction = write ?
823                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
824
825         /* double check that we don't free the table twice */
826         if (!ttm->sg->sgl)
827                 return;
828
829         /* free the sg table and pages again */
830         dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
831
832         amdgpu_ttm_tt_mark_user_pages(ttm);
833
834         sg_free_table(ttm->sg);
835 }
836
837 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
838                                    struct ttm_mem_reg *bo_mem)
839 {
840         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
841         struct amdgpu_ttm_tt *gtt = (void*)ttm;
842         uint64_t flags;
843         int r = 0;
844
845         if (gtt->userptr) {
846                 r = amdgpu_ttm_tt_pin_userptr(ttm);
847                 if (r) {
848                         DRM_ERROR("failed to pin userptr\n");
849                         return r;
850                 }
851         }
852         if (!ttm->num_pages) {
853                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
854                      ttm->num_pages, bo_mem, ttm);
855         }
856
857         if (bo_mem->mem_type == AMDGPU_PL_GDS ||
858             bo_mem->mem_type == AMDGPU_PL_GWS ||
859             bo_mem->mem_type == AMDGPU_PL_OA)
860                 return -EINVAL;
861
862         if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
863                 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
864                 return 0;
865         }
866
867         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
868         gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
869         r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
870                 ttm->pages, gtt->ttm.dma_address, flags);
871
872         if (r)
873                 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
874                           ttm->num_pages, gtt->offset);
875         return r;
876 }
877
878 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
879 {
880         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
881         struct ttm_operation_ctx ctx = { false, false };
882         struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
883         struct ttm_mem_reg tmp;
884         struct ttm_placement placement;
885         struct ttm_place placements;
886         uint64_t flags;
887         int r;
888
889         if (bo->mem.mem_type != TTM_PL_TT ||
890             amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
891                 return 0;
892
893         tmp = bo->mem;
894         tmp.mm_node = NULL;
895         placement.num_placement = 1;
896         placement.placement = &placements;
897         placement.num_busy_placement = 1;
898         placement.busy_placement = &placements;
899         placements.fpfn = 0;
900         placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
901         placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
902                 TTM_PL_FLAG_TT;
903
904         r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
905         if (unlikely(r))
906                 return r;
907
908         flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
909         gtt->offset = (u64)tmp.start << PAGE_SHIFT;
910         r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
911                              bo->ttm->pages, gtt->ttm.dma_address, flags);
912         if (unlikely(r)) {
913                 ttm_bo_mem_put(bo, &tmp);
914                 return r;
915         }
916
917         ttm_bo_mem_put(bo, &bo->mem);
918         bo->mem = tmp;
919         bo->offset = (bo->mem.start << PAGE_SHIFT) +
920                 bo->bdev->man[bo->mem.mem_type].gpu_offset;
921
922         return 0;
923 }
924
925 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
926 {
927         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
928         struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
929         uint64_t flags;
930         int r;
931
932         if (!gtt)
933                 return 0;
934
935         flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
936         r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
937                              gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
938         if (r)
939                 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
940                           gtt->ttm.ttm.num_pages, gtt->offset);
941         return r;
942 }
943
944 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
945 {
946         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
947         struct amdgpu_ttm_tt *gtt = (void *)ttm;
948         int r;
949
950         if (gtt->userptr)
951                 amdgpu_ttm_tt_unpin_userptr(ttm);
952
953         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
954                 return 0;
955
956         /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
957         r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
958         if (r)
959                 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
960                           gtt->ttm.ttm.num_pages, gtt->offset);
961         return r;
962 }
963
964 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
965 {
966         struct amdgpu_ttm_tt *gtt = (void *)ttm;
967
968         ttm_dma_tt_fini(&gtt->ttm);
969         kfree(gtt);
970 }
971
972 static struct ttm_backend_func amdgpu_backend_func = {
973         .bind = &amdgpu_ttm_backend_bind,
974         .unbind = &amdgpu_ttm_backend_unbind,
975         .destroy = &amdgpu_ttm_backend_destroy,
976 };
977
978 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
979                                            uint32_t page_flags)
980 {
981         struct amdgpu_device *adev;
982         struct amdgpu_ttm_tt *gtt;
983
984         adev = amdgpu_ttm_adev(bo->bdev);
985
986         gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
987         if (gtt == NULL) {
988                 return NULL;
989         }
990         gtt->ttm.ttm.func = &amdgpu_backend_func;
991         if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
992                 kfree(gtt);
993                 return NULL;
994         }
995         return &gtt->ttm.ttm;
996 }
997
998 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
999                         struct ttm_operation_ctx *ctx)
1000 {
1001         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1002         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1003         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1004
1005         if (gtt && gtt->userptr) {
1006                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1007                 if (!ttm->sg)
1008                         return -ENOMEM;
1009
1010                 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1011                 ttm->state = tt_unbound;
1012                 return 0;
1013         }
1014
1015         if (slave && ttm->sg) {
1016                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1017                                                  gtt->ttm.dma_address,
1018                                                  ttm->num_pages);
1019                 ttm->state = tt_unbound;
1020                 return 0;
1021         }
1022
1023 #ifdef CONFIG_SWIOTLB
1024         if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1025                 return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
1026         }
1027 #endif
1028
1029         return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
1030 }
1031
1032 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1033 {
1034         struct amdgpu_device *adev;
1035         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1036         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1037
1038         if (gtt && gtt->userptr) {
1039                 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1040                 kfree(ttm->sg);
1041                 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1042                 return;
1043         }
1044
1045         if (slave)
1046                 return;
1047
1048         adev = amdgpu_ttm_adev(ttm->bdev);
1049
1050 #ifdef CONFIG_SWIOTLB
1051         if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1052                 ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1053                 return;
1054         }
1055 #endif
1056
1057         ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
1058 }
1059
1060 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1061                               uint32_t flags)
1062 {
1063         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1064
1065         if (gtt == NULL)
1066                 return -EINVAL;
1067
1068         gtt->userptr = addr;
1069         gtt->usermm = current->mm;
1070         gtt->userflags = flags;
1071         spin_lock_init(&gtt->guptasklock);
1072         INIT_LIST_HEAD(&gtt->guptasks);
1073         atomic_set(&gtt->mmu_invalidations, 0);
1074         gtt->last_set_pages = 0;
1075
1076         return 0;
1077 }
1078
1079 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1080 {
1081         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1082
1083         if (gtt == NULL)
1084                 return NULL;
1085
1086         return gtt->usermm;
1087 }
1088
1089 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1090                                   unsigned long end)
1091 {
1092         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1093         struct amdgpu_ttm_gup_task_list *entry;
1094         unsigned long size;
1095
1096         if (gtt == NULL || !gtt->userptr)
1097                 return false;
1098
1099         size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1100         if (gtt->userptr > end || gtt->userptr + size <= start)
1101                 return false;
1102
1103         spin_lock(&gtt->guptasklock);
1104         list_for_each_entry(entry, &gtt->guptasks, list) {
1105                 if (entry->task == current) {
1106                         spin_unlock(&gtt->guptasklock);
1107                         return false;
1108                 }
1109         }
1110         spin_unlock(&gtt->guptasklock);
1111
1112         atomic_inc(&gtt->mmu_invalidations);
1113
1114         return true;
1115 }
1116
1117 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1118                                        int *last_invalidated)
1119 {
1120         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1121         int prev_invalidated = *last_invalidated;
1122
1123         *last_invalidated = atomic_read(&gtt->mmu_invalidations);
1124         return prev_invalidated != *last_invalidated;
1125 }
1126
1127 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1128 {
1129         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1130
1131         if (gtt == NULL || !gtt->userptr)
1132                 return false;
1133
1134         return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
1135 }
1136
1137 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1138 {
1139         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1140
1141         if (gtt == NULL)
1142                 return false;
1143
1144         return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1145 }
1146
1147 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1148                                  struct ttm_mem_reg *mem)
1149 {
1150         uint64_t flags = 0;
1151
1152         if (mem && mem->mem_type != TTM_PL_SYSTEM)
1153                 flags |= AMDGPU_PTE_VALID;
1154
1155         if (mem && mem->mem_type == TTM_PL_TT) {
1156                 flags |= AMDGPU_PTE_SYSTEM;
1157
1158                 if (ttm->caching_state == tt_cached)
1159                         flags |= AMDGPU_PTE_SNOOPED;
1160         }
1161
1162         flags |= adev->gart.gart_pte_flags;
1163         flags |= AMDGPU_PTE_READABLE;
1164
1165         if (!amdgpu_ttm_tt_is_readonly(ttm))
1166                 flags |= AMDGPU_PTE_WRITEABLE;
1167
1168         return flags;
1169 }
1170
1171 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1172                                             const struct ttm_place *place)
1173 {
1174         unsigned long num_pages = bo->mem.num_pages;
1175         struct drm_mm_node *node = bo->mem.mm_node;
1176         struct reservation_object_list *flist;
1177         struct dma_fence *f;
1178         int i;
1179
1180         /* If bo is a KFD BO, check if the bo belongs to the current process.
1181          * If true, then return false as any KFD process needs all its BOs to
1182          * be resident to run successfully
1183          */
1184         flist = reservation_object_get_list(bo->resv);
1185         if (flist) {
1186                 for (i = 0; i < flist->shared_count; ++i) {
1187                         f = rcu_dereference_protected(flist->shared[i],
1188                                 reservation_object_held(bo->resv));
1189                         if (amdkfd_fence_check_mm(f, current->mm))
1190                                 return false;
1191                 }
1192         }
1193
1194         switch (bo->mem.mem_type) {
1195         case TTM_PL_TT:
1196                 return true;
1197
1198         case TTM_PL_VRAM:
1199                 /* Check each drm MM node individually */
1200                 while (num_pages) {
1201                         if (place->fpfn < (node->start + node->size) &&
1202                             !(place->lpfn && place->lpfn <= node->start))
1203                                 return true;
1204
1205                         num_pages -= node->size;
1206                         ++node;
1207                 }
1208                 return false;
1209
1210         default:
1211                 break;
1212         }
1213
1214         return ttm_bo_eviction_valuable(bo, place);
1215 }
1216
1217 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1218                                     unsigned long offset,
1219                                     void *buf, int len, int write)
1220 {
1221         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1222         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1223         struct drm_mm_node *nodes;
1224         uint32_t value = 0;
1225         int ret = 0;
1226         uint64_t pos;
1227         unsigned long flags;
1228
1229         if (bo->mem.mem_type != TTM_PL_VRAM)
1230                 return -EIO;
1231
1232         nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1233         pos = (nodes->start << PAGE_SHIFT) + offset;
1234
1235         while (len && pos < adev->gmc.mc_vram_size) {
1236                 uint64_t aligned_pos = pos & ~(uint64_t)3;
1237                 uint32_t bytes = 4 - (pos & 3);
1238                 uint32_t shift = (pos & 3) * 8;
1239                 uint32_t mask = 0xffffffff << shift;
1240
1241                 if (len < bytes) {
1242                         mask &= 0xffffffff >> (bytes - len) * 8;
1243                         bytes = len;
1244                 }
1245
1246                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1247                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1248                 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1249                 if (!write || mask != 0xffffffff)
1250                         value = RREG32_NO_KIQ(mmMM_DATA);
1251                 if (write) {
1252                         value &= ~mask;
1253                         value |= (*(uint32_t *)buf << shift) & mask;
1254                         WREG32_NO_KIQ(mmMM_DATA, value);
1255                 }
1256                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1257                 if (!write) {
1258                         value = (value & mask) >> shift;
1259                         memcpy(buf, &value, bytes);
1260                 }
1261
1262                 ret += bytes;
1263                 buf = (uint8_t *)buf + bytes;
1264                 pos += bytes;
1265                 len -= bytes;
1266                 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1267                         ++nodes;
1268                         pos = (nodes->start << PAGE_SHIFT);
1269                 }
1270         }
1271
1272         return ret;
1273 }
1274
1275 static struct ttm_bo_driver amdgpu_bo_driver = {
1276         .ttm_tt_create = &amdgpu_ttm_tt_create,
1277         .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1278         .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1279         .invalidate_caches = &amdgpu_invalidate_caches,
1280         .init_mem_type = &amdgpu_init_mem_type,
1281         .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1282         .evict_flags = &amdgpu_evict_flags,
1283         .move = &amdgpu_bo_move,
1284         .verify_access = &amdgpu_verify_access,
1285         .move_notify = &amdgpu_bo_move_notify,
1286         .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1287         .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1288         .io_mem_free = &amdgpu_ttm_io_mem_free,
1289         .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1290         .access_memory = &amdgpu_ttm_access_memory
1291 };
1292
1293 /*
1294  * Firmware Reservation functions
1295  */
1296 /**
1297  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1298  *
1299  * @adev: amdgpu_device pointer
1300  *
1301  * free fw reserved vram if it has been reserved.
1302  */
1303 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1304 {
1305         amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1306                 NULL, &adev->fw_vram_usage.va);
1307 }
1308
1309 /**
1310  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1311  *
1312  * @adev: amdgpu_device pointer
1313  *
1314  * create bo vram reservation from fw.
1315  */
1316 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1317 {
1318         struct ttm_operation_ctx ctx = { false, false };
1319         int r = 0;
1320         int i;
1321         u64 vram_size = adev->gmc.visible_vram_size;
1322         u64 offset = adev->fw_vram_usage.start_offset;
1323         u64 size = adev->fw_vram_usage.size;
1324         struct amdgpu_bo *bo;
1325
1326         adev->fw_vram_usage.va = NULL;
1327         adev->fw_vram_usage.reserved_bo = NULL;
1328
1329         if (adev->fw_vram_usage.size > 0 &&
1330                 adev->fw_vram_usage.size <= vram_size) {
1331
1332                 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
1333                                      AMDGPU_GEM_DOMAIN_VRAM,
1334                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1335                                      AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1336                                      ttm_bo_type_kernel, NULL,
1337                                      &adev->fw_vram_usage.reserved_bo);
1338                 if (r)
1339                         goto error_create;
1340
1341                 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1342                 if (r)
1343                         goto error_reserve;
1344
1345                 /* remove the original mem node and create a new one at the
1346                  * request position
1347                  */
1348                 bo = adev->fw_vram_usage.reserved_bo;
1349                 offset = ALIGN(offset, PAGE_SIZE);
1350                 for (i = 0; i < bo->placement.num_placement; ++i) {
1351                         bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1352                         bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1353                 }
1354
1355                 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1356                 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1357                                      &bo->tbo.mem, &ctx);
1358                 if (r)
1359                         goto error_pin;
1360
1361                 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1362                         AMDGPU_GEM_DOMAIN_VRAM,
1363                         adev->fw_vram_usage.start_offset,
1364                         (adev->fw_vram_usage.start_offset +
1365                         adev->fw_vram_usage.size), NULL);
1366                 if (r)
1367                         goto error_pin;
1368                 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1369                         &adev->fw_vram_usage.va);
1370                 if (r)
1371                         goto error_kmap;
1372
1373                 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1374         }
1375         return r;
1376
1377 error_kmap:
1378         amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1379 error_pin:
1380         amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1381 error_reserve:
1382         amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1383 error_create:
1384         adev->fw_vram_usage.va = NULL;
1385         adev->fw_vram_usage.reserved_bo = NULL;
1386         return r;
1387 }
1388
1389 int amdgpu_ttm_init(struct amdgpu_device *adev)
1390 {
1391         uint64_t gtt_size;
1392         int r;
1393         u64 vis_vram_limit;
1394
1395         r = amdgpu_ttm_global_init(adev);
1396         if (r) {
1397                 return r;
1398         }
1399         /* No others user of address space so set it to 0 */
1400         r = ttm_bo_device_init(&adev->mman.bdev,
1401                                adev->mman.bo_global_ref.ref.object,
1402                                &amdgpu_bo_driver,
1403                                adev->ddev->anon_inode->i_mapping,
1404                                DRM_FILE_PAGE_OFFSET,
1405                                adev->need_dma32);
1406         if (r) {
1407                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1408                 return r;
1409         }
1410         adev->mman.initialized = true;
1411
1412         /* We opt to avoid OOM on system pages allocations */
1413         adev->mman.bdev.no_retry = true;
1414
1415         r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1416                                 adev->gmc.real_vram_size >> PAGE_SHIFT);
1417         if (r) {
1418                 DRM_ERROR("Failed initializing VRAM heap.\n");
1419                 return r;
1420         }
1421
1422         /* Reduce size of CPU-visible VRAM if requested */
1423         vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1424         if (amdgpu_vis_vram_limit > 0 &&
1425             vis_vram_limit <= adev->gmc.visible_vram_size)
1426                 adev->gmc.visible_vram_size = vis_vram_limit;
1427
1428         /* Change the size here instead of the init above so only lpfn is affected */
1429         amdgpu_ttm_set_buffer_funcs_status(adev, false);
1430 #ifdef CONFIG_64BIT
1431         adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1432                                                 adev->gmc.visible_vram_size);
1433 #endif
1434
1435         /*
1436          *The reserved vram for firmware must be pinned to the specified
1437          *place on the VRAM, so reserve it early.
1438          */
1439         r = amdgpu_ttm_fw_reserve_vram_init(adev);
1440         if (r) {
1441                 return r;
1442         }
1443
1444         if (adev->gmc.stolen_size) {
1445                 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1446                                             AMDGPU_GEM_DOMAIN_VRAM,
1447                                             &adev->stolen_vga_memory,
1448                                             NULL, NULL);
1449                 if (r)
1450                         return r;
1451         }
1452         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1453                  (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1454
1455         if (amdgpu_gtt_size == -1) {
1456                 struct sysinfo si;
1457
1458                 si_meminfo(&si);
1459                 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1460                                adev->gmc.mc_vram_size),
1461                                ((uint64_t)si.totalram * si.mem_unit * 3/4));
1462         }
1463         else
1464                 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1465         r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1466         if (r) {
1467                 DRM_ERROR("Failed initializing GTT heap.\n");
1468                 return r;
1469         }
1470         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1471                  (unsigned)(gtt_size / (1024 * 1024)));
1472
1473         adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1474         adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1475         adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1476         adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1477         adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1478         adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1479         adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1480         adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1481         adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1482         /* GDS Memory */
1483         if (adev->gds.mem.total_size) {
1484                 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1485                                    adev->gds.mem.total_size >> PAGE_SHIFT);
1486                 if (r) {
1487                         DRM_ERROR("Failed initializing GDS heap.\n");
1488                         return r;
1489                 }
1490         }
1491
1492         /* GWS */
1493         if (adev->gds.gws.total_size) {
1494                 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1495                                    adev->gds.gws.total_size >> PAGE_SHIFT);
1496                 if (r) {
1497                         DRM_ERROR("Failed initializing gws heap.\n");
1498                         return r;
1499                 }
1500         }
1501
1502         /* OA */
1503         if (adev->gds.oa.total_size) {
1504                 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1505                                    adev->gds.oa.total_size >> PAGE_SHIFT);
1506                 if (r) {
1507                         DRM_ERROR("Failed initializing oa heap.\n");
1508                         return r;
1509                 }
1510         }
1511
1512         r = amdgpu_ttm_debugfs_init(adev);
1513         if (r) {
1514                 DRM_ERROR("Failed to init debugfs\n");
1515                 return r;
1516         }
1517         return 0;
1518 }
1519
1520 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1521 {
1522         if (!adev->mman.initialized)
1523                 return;
1524
1525         amdgpu_ttm_debugfs_fini(adev);
1526         amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1527         amdgpu_ttm_fw_reserve_vram_fini(adev);
1528         if (adev->mman.aper_base_kaddr)
1529                 iounmap(adev->mman.aper_base_kaddr);
1530         adev->mman.aper_base_kaddr = NULL;
1531
1532         ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1533         ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1534         if (adev->gds.mem.total_size)
1535                 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1536         if (adev->gds.gws.total_size)
1537                 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1538         if (adev->gds.oa.total_size)
1539                 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1540         ttm_bo_device_release(&adev->mman.bdev);
1541         amdgpu_ttm_global_fini(adev);
1542         adev->mman.initialized = false;
1543         DRM_INFO("amdgpu: ttm finalized\n");
1544 }
1545
1546 /**
1547  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1548  *
1549  * @adev: amdgpu_device pointer
1550  * @enable: true when we can use buffer functions.
1551  *
1552  * Enable/disable use of buffer functions during suspend/resume. This should
1553  * only be called at bootup or when userspace isn't running.
1554  */
1555 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1556 {
1557         struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1558         uint64_t size;
1559
1560         if (!adev->mman.initialized || adev->in_gpu_reset)
1561                 return;
1562
1563         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1564         if (enable)
1565                 size = adev->gmc.real_vram_size;
1566         else
1567                 size = adev->gmc.visible_vram_size;
1568         man->size = size >> PAGE_SHIFT;
1569         adev->mman.buffer_funcs_enabled = enable;
1570 }
1571
1572 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1573 {
1574         struct drm_file *file_priv;
1575         struct amdgpu_device *adev;
1576
1577         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1578                 return -EINVAL;
1579
1580         file_priv = filp->private_data;
1581         adev = file_priv->minor->dev->dev_private;
1582         if (adev == NULL)
1583                 return -EINVAL;
1584
1585         return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1586 }
1587
1588 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1589                              struct ttm_mem_reg *mem, unsigned num_pages,
1590                              uint64_t offset, unsigned window,
1591                              struct amdgpu_ring *ring,
1592                              uint64_t *addr)
1593 {
1594         struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1595         struct amdgpu_device *adev = ring->adev;
1596         struct ttm_tt *ttm = bo->ttm;
1597         struct amdgpu_job *job;
1598         unsigned num_dw, num_bytes;
1599         dma_addr_t *dma_address;
1600         struct dma_fence *fence;
1601         uint64_t src_addr, dst_addr;
1602         uint64_t flags;
1603         int r;
1604
1605         BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1606                AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1607
1608         *addr = adev->gmc.gart_start;
1609         *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1610                 AMDGPU_GPU_PAGE_SIZE;
1611
1612         num_dw = adev->mman.buffer_funcs->copy_num_dw;
1613         while (num_dw & 0x7)
1614                 num_dw++;
1615
1616         num_bytes = num_pages * 8;
1617
1618         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1619         if (r)
1620                 return r;
1621
1622         src_addr = num_dw * 4;
1623         src_addr += job->ibs[0].gpu_addr;
1624
1625         dst_addr = adev->gart.table_addr;
1626         dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1627         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1628                                 dst_addr, num_bytes);
1629
1630         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1631         WARN_ON(job->ibs[0].length_dw > num_dw);
1632
1633         dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
1634         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1635         r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1636                             &job->ibs[0].ptr[num_dw]);
1637         if (r)
1638                 goto error_free;
1639
1640         r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1641                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1642         if (r)
1643                 goto error_free;
1644
1645         dma_fence_put(fence);
1646
1647         return r;
1648
1649 error_free:
1650         amdgpu_job_free(job);
1651         return r;
1652 }
1653
1654 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1655                        uint64_t dst_offset, uint32_t byte_count,
1656                        struct reservation_object *resv,
1657                        struct dma_fence **fence, bool direct_submit,
1658                        bool vm_needs_flush)
1659 {
1660         struct amdgpu_device *adev = ring->adev;
1661         struct amdgpu_job *job;
1662
1663         uint32_t max_bytes;
1664         unsigned num_loops, num_dw;
1665         unsigned i;
1666         int r;
1667
1668         if (direct_submit && !ring->ready) {
1669                 DRM_ERROR("Trying to move memory with ring turned off.\n");
1670                 return -EINVAL;
1671         }
1672
1673         max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1674         num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1675         num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1676
1677         /* for IB padding */
1678         while (num_dw & 0x7)
1679                 num_dw++;
1680
1681         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1682         if (r)
1683                 return r;
1684
1685         job->vm_needs_flush = vm_needs_flush;
1686         if (resv) {
1687                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1688                                      AMDGPU_FENCE_OWNER_UNDEFINED,
1689                                      false);
1690                 if (r) {
1691                         DRM_ERROR("sync failed (%d).\n", r);
1692                         goto error_free;
1693                 }
1694         }
1695
1696         for (i = 0; i < num_loops; i++) {
1697                 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1698
1699                 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1700                                         dst_offset, cur_size_in_bytes);
1701
1702                 src_offset += cur_size_in_bytes;
1703                 dst_offset += cur_size_in_bytes;
1704                 byte_count -= cur_size_in_bytes;
1705         }
1706
1707         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1708         WARN_ON(job->ibs[0].length_dw > num_dw);
1709         if (direct_submit) {
1710                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1711                                        NULL, fence);
1712                 job->fence = dma_fence_get(*fence);
1713                 if (r)
1714                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
1715                 amdgpu_job_free(job);
1716         } else {
1717                 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1718                                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1719                 if (r)
1720                         goto error_free;
1721         }
1722
1723         return r;
1724
1725 error_free:
1726         amdgpu_job_free(job);
1727         return r;
1728 }
1729
1730 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1731                        uint32_t src_data,
1732                        struct reservation_object *resv,
1733                        struct dma_fence **fence)
1734 {
1735         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1736         uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1737         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1738
1739         struct drm_mm_node *mm_node;
1740         unsigned long num_pages;
1741         unsigned int num_loops, num_dw;
1742
1743         struct amdgpu_job *job;
1744         int r;
1745
1746         if (!adev->mman.buffer_funcs_enabled) {
1747                 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1748                 return -EINVAL;
1749         }
1750
1751         if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1752                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
1753                 if (r)
1754                         return r;
1755         }
1756
1757         num_pages = bo->tbo.num_pages;
1758         mm_node = bo->tbo.mem.mm_node;
1759         num_loops = 0;
1760         while (num_pages) {
1761                 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1762
1763                 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1764                 num_pages -= mm_node->size;
1765                 ++mm_node;
1766         }
1767         num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1768
1769         /* for IB padding */
1770         num_dw += 64;
1771
1772         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1773         if (r)
1774                 return r;
1775
1776         if (resv) {
1777                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1778                                      AMDGPU_FENCE_OWNER_UNDEFINED, false);
1779                 if (r) {
1780                         DRM_ERROR("sync failed (%d).\n", r);
1781                         goto error_free;
1782                 }
1783         }
1784
1785         num_pages = bo->tbo.num_pages;
1786         mm_node = bo->tbo.mem.mm_node;
1787
1788         while (num_pages) {
1789                 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1790                 uint64_t dst_addr;
1791
1792                 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
1793                 while (byte_count) {
1794                         uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1795
1796                         amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1797                                                 dst_addr, cur_size_in_bytes);
1798
1799                         dst_addr += cur_size_in_bytes;
1800                         byte_count -= cur_size_in_bytes;
1801                 }
1802
1803                 num_pages -= mm_node->size;
1804                 ++mm_node;
1805         }
1806
1807         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1808         WARN_ON(job->ibs[0].length_dw > num_dw);
1809         r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1810                               AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1811         if (r)
1812                 goto error_free;
1813
1814         return 0;
1815
1816 error_free:
1817         amdgpu_job_free(job);
1818         return r;
1819 }
1820
1821 #if defined(CONFIG_DEBUG_FS)
1822
1823 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1824 {
1825         struct drm_info_node *node = (struct drm_info_node *)m->private;
1826         unsigned ttm_pl = *(int *)node->info_ent->data;
1827         struct drm_device *dev = node->minor->dev;
1828         struct amdgpu_device *adev = dev->dev_private;
1829         struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
1830         struct drm_printer p = drm_seq_file_printer(m);
1831
1832         man->func->debug(man, &p);
1833         return 0;
1834 }
1835
1836 static int ttm_pl_vram = TTM_PL_VRAM;
1837 static int ttm_pl_tt = TTM_PL_TT;
1838
1839 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1840         {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1841         {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1842         {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1843 #ifdef CONFIG_SWIOTLB
1844         {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1845 #endif
1846 };
1847
1848 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1849                                     size_t size, loff_t *pos)
1850 {
1851         struct amdgpu_device *adev = file_inode(f)->i_private;
1852         ssize_t result = 0;
1853         int r;
1854
1855         if (size & 0x3 || *pos & 0x3)
1856                 return -EINVAL;
1857
1858         if (*pos >= adev->gmc.mc_vram_size)
1859                 return -ENXIO;
1860
1861         while (size) {
1862                 unsigned long flags;
1863                 uint32_t value;
1864
1865                 if (*pos >= adev->gmc.mc_vram_size)
1866                         return result;
1867
1868                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1869                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1870                 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1871                 value = RREG32_NO_KIQ(mmMM_DATA);
1872                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1873
1874                 r = put_user(value, (uint32_t *)buf);
1875                 if (r)
1876                         return r;
1877
1878                 result += 4;
1879                 buf += 4;
1880                 *pos += 4;
1881                 size -= 4;
1882         }
1883
1884         return result;
1885 }
1886
1887 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
1888                                     size_t size, loff_t *pos)
1889 {
1890         struct amdgpu_device *adev = file_inode(f)->i_private;
1891         ssize_t result = 0;
1892         int r;
1893
1894         if (size & 0x3 || *pos & 0x3)
1895                 return -EINVAL;
1896
1897         if (*pos >= adev->gmc.mc_vram_size)
1898                 return -ENXIO;
1899
1900         while (size) {
1901                 unsigned long flags;
1902                 uint32_t value;
1903
1904                 if (*pos >= adev->gmc.mc_vram_size)
1905                         return result;
1906
1907                 r = get_user(value, (uint32_t *)buf);
1908                 if (r)
1909                         return r;
1910
1911                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1912                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1913                 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1914                 WREG32_NO_KIQ(mmMM_DATA, value);
1915                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1916
1917                 result += 4;
1918                 buf += 4;
1919                 *pos += 4;
1920                 size -= 4;
1921         }
1922
1923         return result;
1924 }
1925
1926 static const struct file_operations amdgpu_ttm_vram_fops = {
1927         .owner = THIS_MODULE,
1928         .read = amdgpu_ttm_vram_read,
1929         .write = amdgpu_ttm_vram_write,
1930         .llseek = default_llseek,
1931 };
1932
1933 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1934
1935 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1936                                    size_t size, loff_t *pos)
1937 {
1938         struct amdgpu_device *adev = file_inode(f)->i_private;
1939         ssize_t result = 0;
1940         int r;
1941
1942         while (size) {
1943                 loff_t p = *pos / PAGE_SIZE;
1944                 unsigned off = *pos & ~PAGE_MASK;
1945                 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1946                 struct page *page;
1947                 void *ptr;
1948
1949                 if (p >= adev->gart.num_cpu_pages)
1950                         return result;
1951
1952                 page = adev->gart.pages[p];
1953                 if (page) {
1954                         ptr = kmap(page);
1955                         ptr += off;
1956
1957                         r = copy_to_user(buf, ptr, cur_size);
1958                         kunmap(adev->gart.pages[p]);
1959                 } else
1960                         r = clear_user(buf, cur_size);
1961
1962                 if (r)
1963                         return -EFAULT;
1964
1965                 result += cur_size;
1966                 buf += cur_size;
1967                 *pos += cur_size;
1968                 size -= cur_size;
1969         }
1970
1971         return result;
1972 }
1973
1974 static const struct file_operations amdgpu_ttm_gtt_fops = {
1975         .owner = THIS_MODULE,
1976         .read = amdgpu_ttm_gtt_read,
1977         .llseek = default_llseek
1978 };
1979
1980 #endif
1981
1982 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
1983                                  size_t size, loff_t *pos)
1984 {
1985         struct amdgpu_device *adev = file_inode(f)->i_private;
1986         struct iommu_domain *dom;
1987         ssize_t result = 0;
1988         int r;
1989
1990         dom = iommu_get_domain_for_dev(adev->dev);
1991
1992         while (size) {
1993                 phys_addr_t addr = *pos & PAGE_MASK;
1994                 loff_t off = *pos & ~PAGE_MASK;
1995                 size_t bytes = PAGE_SIZE - off;
1996                 unsigned long pfn;
1997                 struct page *p;
1998                 void *ptr;
1999
2000                 bytes = bytes < size ? bytes : size;
2001
2002                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2003
2004                 pfn = addr >> PAGE_SHIFT;
2005                 if (!pfn_valid(pfn))
2006                         return -EPERM;
2007
2008                 p = pfn_to_page(pfn);
2009                 if (p->mapping != adev->mman.bdev.dev_mapping)
2010                         return -EPERM;
2011
2012                 ptr = kmap(p);
2013                 r = copy_to_user(buf, ptr + off, bytes);
2014                 kunmap(p);
2015                 if (r)
2016                         return -EFAULT;
2017
2018                 size -= bytes;
2019                 *pos += bytes;
2020                 result += bytes;
2021         }
2022
2023         return result;
2024 }
2025
2026 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2027                                  size_t size, loff_t *pos)
2028 {
2029         struct amdgpu_device *adev = file_inode(f)->i_private;
2030         struct iommu_domain *dom;
2031         ssize_t result = 0;
2032         int r;
2033
2034         dom = iommu_get_domain_for_dev(adev->dev);
2035
2036         while (size) {
2037                 phys_addr_t addr = *pos & PAGE_MASK;
2038                 loff_t off = *pos & ~PAGE_MASK;
2039                 size_t bytes = PAGE_SIZE - off;
2040                 unsigned long pfn;
2041                 struct page *p;
2042                 void *ptr;
2043
2044                 bytes = bytes < size ? bytes : size;
2045
2046                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2047
2048                 pfn = addr >> PAGE_SHIFT;
2049                 if (!pfn_valid(pfn))
2050                         return -EPERM;
2051
2052                 p = pfn_to_page(pfn);
2053                 if (p->mapping != adev->mman.bdev.dev_mapping)
2054                         return -EPERM;
2055
2056                 ptr = kmap(p);
2057                 r = copy_from_user(ptr + off, buf, bytes);
2058                 kunmap(p);
2059                 if (r)
2060                         return -EFAULT;
2061
2062                 size -= bytes;
2063                 *pos += bytes;
2064                 result += bytes;
2065         }
2066
2067         return result;
2068 }
2069
2070 static const struct file_operations amdgpu_ttm_iomem_fops = {
2071         .owner = THIS_MODULE,
2072         .read = amdgpu_iomem_read,
2073         .write = amdgpu_iomem_write,
2074         .llseek = default_llseek
2075 };
2076
2077 static const struct {
2078         char *name;
2079         const struct file_operations *fops;
2080         int domain;
2081 } ttm_debugfs_entries[] = {
2082         { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2083 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2084         { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2085 #endif
2086         { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2087 };
2088
2089 #endif
2090
2091 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2092 {
2093 #if defined(CONFIG_DEBUG_FS)
2094         unsigned count;
2095
2096         struct drm_minor *minor = adev->ddev->primary;
2097         struct dentry *ent, *root = minor->debugfs_root;
2098
2099         for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2100                 ent = debugfs_create_file(
2101                                 ttm_debugfs_entries[count].name,
2102                                 S_IFREG | S_IRUGO, root,
2103                                 adev,
2104                                 ttm_debugfs_entries[count].fops);
2105                 if (IS_ERR(ent))
2106                         return PTR_ERR(ent);
2107                 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2108                         i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2109                 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2110                         i_size_write(ent->d_inode, adev->gmc.gart_size);
2111                 adev->mman.debugfs_entries[count] = ent;
2112         }
2113
2114         count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2115
2116 #ifdef CONFIG_SWIOTLB
2117         if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2118                 --count;
2119 #endif
2120
2121         return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2122 #else
2123         return 0;
2124 #endif
2125 }
2126
2127 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2128 {
2129 #if defined(CONFIG_DEBUG_FS)
2130         unsigned i;
2131
2132         for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2133                 debugfs_remove(adev->mman.debugfs_entries[i]);
2134 #endif
2135 }
This page took 0.165947 seconds and 4 git commands to generate.