]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge branch 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39
40
41
42 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
43                                                 struct ttm_mem_reg *mem)
44 {
45         if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
46                 return 0;
47
48         return ((mem->start << PAGE_SHIFT) + mem->size) >
49                 adev->mc.visible_vram_size ?
50                 adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51                 mem->size;
52 }
53
54 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
55                        struct ttm_mem_reg *old_mem,
56                        struct ttm_mem_reg *new_mem)
57 {
58         u64 vis_size;
59         if (!adev)
60                 return;
61
62         if (new_mem) {
63                 switch (new_mem->mem_type) {
64                 case TTM_PL_TT:
65                         atomic64_add(new_mem->size, &adev->gtt_usage);
66                         break;
67                 case TTM_PL_VRAM:
68                         atomic64_add(new_mem->size, &adev->vram_usage);
69                         vis_size = amdgpu_get_vis_part_size(adev, new_mem);
70                         atomic64_add(vis_size, &adev->vram_vis_usage);
71                         break;
72                 }
73         }
74
75         if (old_mem) {
76                 switch (old_mem->mem_type) {
77                 case TTM_PL_TT:
78                         atomic64_sub(old_mem->size, &adev->gtt_usage);
79                         break;
80                 case TTM_PL_VRAM:
81                         atomic64_sub(old_mem->size, &adev->vram_usage);
82                         vis_size = amdgpu_get_vis_part_size(adev, old_mem);
83                         atomic64_sub(vis_size, &adev->vram_vis_usage);
84                         break;
85                 }
86         }
87 }
88
89 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
90 {
91         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
92         struct amdgpu_bo *bo;
93
94         bo = container_of(tbo, struct amdgpu_bo, tbo);
95
96         amdgpu_bo_kunmap(bo);
97         amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
98
99         drm_gem_object_release(&bo->gem_base);
100         amdgpu_bo_unref(&bo->parent);
101         if (!list_empty(&bo->shadow_list)) {
102                 mutex_lock(&adev->shadow_list_lock);
103                 list_del_init(&bo->shadow_list);
104                 mutex_unlock(&adev->shadow_list_lock);
105         }
106         kfree(bo->metadata);
107         kfree(bo);
108 }
109
110 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
111 {
112         if (bo->destroy == &amdgpu_ttm_bo_destroy)
113                 return true;
114         return false;
115 }
116
117 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
118                                       struct ttm_placement *placement,
119                                       struct ttm_place *places,
120                                       u32 domain, u64 flags)
121 {
122         u32 c = 0;
123
124         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
125                 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
126
127                 places[c].fpfn = 0;
128                 places[c].lpfn = 0;
129                 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
130                         TTM_PL_FLAG_VRAM;
131
132                 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
133                         places[c].lpfn = visible_pfn;
134                 else
135                         places[c].flags |= TTM_PL_FLAG_TOPDOWN;
136
137                 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
138                         places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
139                 c++;
140         }
141
142         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
143                 places[c].fpfn = 0;
144                 places[c].lpfn = 0;
145                 places[c].flags = TTM_PL_FLAG_TT;
146                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
147                         places[c].flags |= TTM_PL_FLAG_WC |
148                                 TTM_PL_FLAG_UNCACHED;
149                 else
150                         places[c].flags |= TTM_PL_FLAG_CACHED;
151                 c++;
152         }
153
154         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
155                 places[c].fpfn = 0;
156                 places[c].lpfn = 0;
157                 places[c].flags = TTM_PL_FLAG_SYSTEM;
158                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
159                         places[c].flags |= TTM_PL_FLAG_WC |
160                                 TTM_PL_FLAG_UNCACHED;
161                 else
162                         places[c].flags |= TTM_PL_FLAG_CACHED;
163                 c++;
164         }
165
166         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
167                 places[c].fpfn = 0;
168                 places[c].lpfn = 0;
169                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
170                 c++;
171         }
172
173         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
174                 places[c].fpfn = 0;
175                 places[c].lpfn = 0;
176                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
177                 c++;
178         }
179
180         if (domain & AMDGPU_GEM_DOMAIN_OA) {
181                 places[c].fpfn = 0;
182                 places[c].lpfn = 0;
183                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
184                 c++;
185         }
186
187         if (!c) {
188                 places[c].fpfn = 0;
189                 places[c].lpfn = 0;
190                 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
191                 c++;
192         }
193
194         placement->num_placement = c;
195         placement->placement = places;
196
197         placement->num_busy_placement = c;
198         placement->busy_placement = places;
199 }
200
201 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
202 {
203         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
204
205         amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
206                                   domain, abo->flags);
207 }
208
209 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
210                                         struct ttm_placement *placement)
211 {
212         BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
213
214         memcpy(bo->placements, placement->placement,
215                placement->num_placement * sizeof(struct ttm_place));
216         bo->placement.num_placement = placement->num_placement;
217         bo->placement.num_busy_placement = placement->num_busy_placement;
218         bo->placement.placement = bo->placements;
219         bo->placement.busy_placement = bo->placements;
220 }
221
222 /**
223  * amdgpu_bo_create_kernel - create BO for kernel use
224  *
225  * @adev: amdgpu device object
226  * @size: size for the new BO
227  * @align: alignment for the new BO
228  * @domain: where to place it
229  * @bo_ptr: resulting BO
230  * @gpu_addr: GPU addr of the pinned BO
231  * @cpu_addr: optional CPU address mapping
232  *
233  * Allocates and pins a BO for kernel internal use.
234  *
235  * Returns 0 on success, negative error code otherwise.
236  */
237 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
238                             unsigned long size, int align,
239                             u32 domain, struct amdgpu_bo **bo_ptr,
240                             u64 *gpu_addr, void **cpu_addr)
241 {
242         int r;
243
244         r = amdgpu_bo_create(adev, size, align, true, domain,
245                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
246                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
247                              NULL, NULL, bo_ptr);
248         if (r) {
249                 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
250                 return r;
251         }
252
253         r = amdgpu_bo_reserve(*bo_ptr, false);
254         if (r) {
255                 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
256                 goto error_free;
257         }
258
259         r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
260         if (r) {
261                 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
262                 goto error_unreserve;
263         }
264
265         if (cpu_addr) {
266                 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
267                 if (r) {
268                         dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
269                         goto error_unreserve;
270                 }
271         }
272
273         amdgpu_bo_unreserve(*bo_ptr);
274
275         return 0;
276
277 error_unreserve:
278         amdgpu_bo_unreserve(*bo_ptr);
279
280 error_free:
281         amdgpu_bo_unref(bo_ptr);
282
283         return r;
284 }
285
286 /**
287  * amdgpu_bo_free_kernel - free BO for kernel use
288  *
289  * @bo: amdgpu BO to free
290  *
291  * unmaps and unpin a BO for kernel internal use.
292  */
293 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
294                            void **cpu_addr)
295 {
296         if (*bo == NULL)
297                 return;
298
299         if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
300                 if (cpu_addr)
301                         amdgpu_bo_kunmap(*bo);
302
303                 amdgpu_bo_unpin(*bo);
304                 amdgpu_bo_unreserve(*bo);
305         }
306         amdgpu_bo_unref(bo);
307
308         if (gpu_addr)
309                 *gpu_addr = 0;
310
311         if (cpu_addr)
312                 *cpu_addr = NULL;
313 }
314
315 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
316                                 unsigned long size, int byte_align,
317                                 bool kernel, u32 domain, u64 flags,
318                                 struct sg_table *sg,
319                                 struct ttm_placement *placement,
320                                 struct reservation_object *resv,
321                                 struct amdgpu_bo **bo_ptr)
322 {
323         struct amdgpu_bo *bo;
324         enum ttm_bo_type type;
325         unsigned long page_align;
326         u64 initial_bytes_moved, bytes_moved;
327         size_t acc_size;
328         int r;
329
330         page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
331         size = ALIGN(size, PAGE_SIZE);
332
333         if (kernel) {
334                 type = ttm_bo_type_kernel;
335         } else if (sg) {
336                 type = ttm_bo_type_sg;
337         } else {
338                 type = ttm_bo_type_device;
339         }
340         *bo_ptr = NULL;
341
342         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
343                                        sizeof(struct amdgpu_bo));
344
345         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
346         if (bo == NULL)
347                 return -ENOMEM;
348         r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
349         if (unlikely(r)) {
350                 kfree(bo);
351                 return r;
352         }
353         INIT_LIST_HEAD(&bo->shadow_list);
354         INIT_LIST_HEAD(&bo->va);
355         bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
356                                          AMDGPU_GEM_DOMAIN_GTT |
357                                          AMDGPU_GEM_DOMAIN_CPU |
358                                          AMDGPU_GEM_DOMAIN_GDS |
359                                          AMDGPU_GEM_DOMAIN_GWS |
360                                          AMDGPU_GEM_DOMAIN_OA);
361         bo->allowed_domains = bo->prefered_domains;
362         if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
363                 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
364
365         bo->flags = flags;
366
367 #ifdef CONFIG_X86_32
368         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
369          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
370          */
371         bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
372 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
373         /* Don't try to enable write-combining when it can't work, or things
374          * may be slow
375          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
376          */
377
378 #ifndef CONFIG_COMPILE_TEST
379 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
380          thanks to write-combining
381 #endif
382
383         if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
384                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
385                               "better performance thanks to write-combining\n");
386         bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
387 #else
388         /* For architectures that don't support WC memory,
389          * mask out the WC flag from the BO
390          */
391         if (!drm_arch_can_wc_memory())
392                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
393 #endif
394
395         amdgpu_fill_placement_to_bo(bo, placement);
396         /* Kernel allocation are uninterruptible */
397
398         initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
399         r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
400                                  &bo->placement, page_align, !kernel, NULL,
401                                  acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
402         bytes_moved = atomic64_read(&adev->num_bytes_moved) -
403                       initial_bytes_moved;
404         if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
405             bo->tbo.mem.mem_type == TTM_PL_VRAM &&
406             bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
407                 amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
408         else
409                 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
410
411         if (unlikely(r != 0))
412                 return r;
413
414         if (kernel)
415                 bo->tbo.priority = 1;
416
417         if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
418             bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
419                 struct dma_fence *fence;
420
421                 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
422                 if (unlikely(r))
423                         goto fail_unreserve;
424
425                 amdgpu_bo_fence(bo, fence, false);
426                 dma_fence_put(bo->tbo.moving);
427                 bo->tbo.moving = dma_fence_get(fence);
428                 dma_fence_put(fence);
429         }
430         if (!resv)
431                 amdgpu_bo_unreserve(bo);
432         *bo_ptr = bo;
433
434         trace_amdgpu_bo_create(bo);
435
436         /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
437         if (type == ttm_bo_type_device)
438                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
439
440         return 0;
441
442 fail_unreserve:
443         if (!resv)
444                 ww_mutex_unlock(&bo->tbo.resv->lock);
445         amdgpu_bo_unref(&bo);
446         return r;
447 }
448
449 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
450                                    unsigned long size, int byte_align,
451                                    struct amdgpu_bo *bo)
452 {
453         struct ttm_placement placement = {0};
454         struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
455         int r;
456
457         if (bo->shadow)
458                 return 0;
459
460         bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
461         memset(&placements, 0,
462                (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
463
464         amdgpu_ttm_placement_init(adev, &placement,
465                                   placements, AMDGPU_GEM_DOMAIN_GTT,
466                                   AMDGPU_GEM_CREATE_CPU_GTT_USWC);
467
468         r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
469                                         AMDGPU_GEM_DOMAIN_GTT,
470                                         AMDGPU_GEM_CREATE_CPU_GTT_USWC,
471                                         NULL, &placement,
472                                         bo->tbo.resv,
473                                         &bo->shadow);
474         if (!r) {
475                 bo->shadow->parent = amdgpu_bo_ref(bo);
476                 mutex_lock(&adev->shadow_list_lock);
477                 list_add_tail(&bo->shadow_list, &adev->shadow_list);
478                 mutex_unlock(&adev->shadow_list_lock);
479         }
480
481         return r;
482 }
483
484 int amdgpu_bo_create(struct amdgpu_device *adev,
485                      unsigned long size, int byte_align,
486                      bool kernel, u32 domain, u64 flags,
487                      struct sg_table *sg,
488                      struct reservation_object *resv,
489                      struct amdgpu_bo **bo_ptr)
490 {
491         struct ttm_placement placement = {0};
492         struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
493         int r;
494
495         memset(&placements, 0,
496                (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
497
498         amdgpu_ttm_placement_init(adev, &placement,
499                                   placements, domain, flags);
500
501         r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
502                                         domain, flags, sg, &placement,
503                                         resv, bo_ptr);
504         if (r)
505                 return r;
506
507         if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
508                 if (!resv) {
509                         r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
510                         WARN_ON(r != 0);
511                 }
512
513                 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
514
515                 if (!resv)
516                         ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
517
518                 if (r)
519                         amdgpu_bo_unref(bo_ptr);
520         }
521
522         return r;
523 }
524
525 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
526                                struct amdgpu_ring *ring,
527                                struct amdgpu_bo *bo,
528                                struct reservation_object *resv,
529                                struct dma_fence **fence,
530                                bool direct)
531
532 {
533         struct amdgpu_bo *shadow = bo->shadow;
534         uint64_t bo_addr, shadow_addr;
535         int r;
536
537         if (!shadow)
538                 return -EINVAL;
539
540         bo_addr = amdgpu_bo_gpu_offset(bo);
541         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
542
543         r = reservation_object_reserve_shared(bo->tbo.resv);
544         if (r)
545                 goto err;
546
547         r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
548                                amdgpu_bo_size(bo), resv, fence,
549                                direct, false);
550         if (!r)
551                 amdgpu_bo_fence(bo, *fence, true);
552
553 err:
554         return r;
555 }
556
557 int amdgpu_bo_validate(struct amdgpu_bo *bo)
558 {
559         uint32_t domain;
560         int r;
561
562         if (bo->pin_count)
563                 return 0;
564
565         domain = bo->prefered_domains;
566
567 retry:
568         amdgpu_ttm_placement_from_domain(bo, domain);
569         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
570         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
571                 domain = bo->allowed_domains;
572                 goto retry;
573         }
574
575         return r;
576 }
577
578 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
579                                   struct amdgpu_ring *ring,
580                                   struct amdgpu_bo *bo,
581                                   struct reservation_object *resv,
582                                   struct dma_fence **fence,
583                                   bool direct)
584
585 {
586         struct amdgpu_bo *shadow = bo->shadow;
587         uint64_t bo_addr, shadow_addr;
588         int r;
589
590         if (!shadow)
591                 return -EINVAL;
592
593         bo_addr = amdgpu_bo_gpu_offset(bo);
594         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
595
596         r = reservation_object_reserve_shared(bo->tbo.resv);
597         if (r)
598                 goto err;
599
600         r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
601                                amdgpu_bo_size(bo), resv, fence,
602                                direct, false);
603         if (!r)
604                 amdgpu_bo_fence(bo, *fence, true);
605
606 err:
607         return r;
608 }
609
610 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
611 {
612         bool is_iomem;
613         long r;
614
615         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
616                 return -EPERM;
617
618         if (bo->kptr) {
619                 if (ptr) {
620                         *ptr = bo->kptr;
621                 }
622                 return 0;
623         }
624
625         r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
626                                                 MAX_SCHEDULE_TIMEOUT);
627         if (r < 0)
628                 return r;
629
630         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
631         if (r)
632                 return r;
633
634         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
635         if (ptr)
636                 *ptr = bo->kptr;
637
638         return 0;
639 }
640
641 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
642 {
643         if (bo->kptr == NULL)
644                 return;
645         bo->kptr = NULL;
646         ttm_bo_kunmap(&bo->kmap);
647 }
648
649 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
650 {
651         if (bo == NULL)
652                 return NULL;
653
654         ttm_bo_reference(&bo->tbo);
655         return bo;
656 }
657
658 void amdgpu_bo_unref(struct amdgpu_bo **bo)
659 {
660         struct ttm_buffer_object *tbo;
661
662         if ((*bo) == NULL)
663                 return;
664
665         tbo = &((*bo)->tbo);
666         ttm_bo_unref(&tbo);
667         if (tbo == NULL)
668                 *bo = NULL;
669 }
670
671 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
672                              u64 min_offset, u64 max_offset,
673                              u64 *gpu_addr)
674 {
675         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
676         int r, i;
677         unsigned fpfn, lpfn;
678
679         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
680                 return -EPERM;
681
682         if (WARN_ON_ONCE(min_offset > max_offset))
683                 return -EINVAL;
684
685         /* A shared bo cannot be migrated to VRAM */
686         if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
687                 return -EINVAL;
688
689         if (bo->pin_count) {
690                 uint32_t mem_type = bo->tbo.mem.mem_type;
691
692                 if (domain != amdgpu_mem_type_to_domain(mem_type))
693                         return -EINVAL;
694
695                 bo->pin_count++;
696                 if (gpu_addr)
697                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
698
699                 if (max_offset != 0) {
700                         u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
701                         WARN_ON_ONCE(max_offset <
702                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
703                 }
704
705                 return 0;
706         }
707
708         bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
709         amdgpu_ttm_placement_from_domain(bo, domain);
710         for (i = 0; i < bo->placement.num_placement; i++) {
711                 /* force to pin into visible video ram */
712                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
713                     !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
714                     (!max_offset || max_offset >
715                      adev->mc.visible_vram_size)) {
716                         if (WARN_ON_ONCE(min_offset >
717                                          adev->mc.visible_vram_size))
718                                 return -EINVAL;
719                         fpfn = min_offset >> PAGE_SHIFT;
720                         lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
721                 } else {
722                         fpfn = min_offset >> PAGE_SHIFT;
723                         lpfn = max_offset >> PAGE_SHIFT;
724                 }
725                 if (fpfn > bo->placements[i].fpfn)
726                         bo->placements[i].fpfn = fpfn;
727                 if (!bo->placements[i].lpfn ||
728                     (lpfn && lpfn < bo->placements[i].lpfn))
729                         bo->placements[i].lpfn = lpfn;
730                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
731         }
732
733         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
734         if (unlikely(r)) {
735                 dev_err(adev->dev, "%p pin failed\n", bo);
736                 goto error;
737         }
738
739         bo->pin_count = 1;
740         if (gpu_addr != NULL) {
741                 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
742                 if (unlikely(r)) {
743                         dev_err(adev->dev, "%p bind failed\n", bo);
744                         goto error;
745                 }
746                 *gpu_addr = amdgpu_bo_gpu_offset(bo);
747         }
748         if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
749                 adev->vram_pin_size += amdgpu_bo_size(bo);
750                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
751                         adev->invisible_pin_size += amdgpu_bo_size(bo);
752         } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
753                 adev->gart_pin_size += amdgpu_bo_size(bo);
754         }
755
756 error:
757         return r;
758 }
759
760 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
761 {
762         return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
763 }
764
765 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
766 {
767         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
768         int r, i;
769
770         if (!bo->pin_count) {
771                 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
772                 return 0;
773         }
774         bo->pin_count--;
775         if (bo->pin_count)
776                 return 0;
777         for (i = 0; i < bo->placement.num_placement; i++) {
778                 bo->placements[i].lpfn = 0;
779                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
780         }
781         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
782         if (unlikely(r)) {
783                 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
784                 goto error;
785         }
786
787         if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
788                 adev->vram_pin_size -= amdgpu_bo_size(bo);
789                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
790                         adev->invisible_pin_size -= amdgpu_bo_size(bo);
791         } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
792                 adev->gart_pin_size -= amdgpu_bo_size(bo);
793         }
794
795 error:
796         return r;
797 }
798
799 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
800 {
801         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
802         if (0 && (adev->flags & AMD_IS_APU)) {
803                 /* Useless to evict on IGP chips */
804                 return 0;
805         }
806         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
807 }
808
809 static const char *amdgpu_vram_names[] = {
810         "UNKNOWN",
811         "GDDR1",
812         "DDR2",
813         "GDDR3",
814         "GDDR4",
815         "GDDR5",
816         "HBM",
817         "DDR3"
818 };
819
820 int amdgpu_bo_init(struct amdgpu_device *adev)
821 {
822         /* reserve PAT memory space to WC for VRAM */
823         arch_io_reserve_memtype_wc(adev->mc.aper_base,
824                                    adev->mc.aper_size);
825
826         /* Add an MTRR for the VRAM */
827         adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
828                                               adev->mc.aper_size);
829         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
830                 adev->mc.mc_vram_size >> 20,
831                 (unsigned long long)adev->mc.aper_size >> 20);
832         DRM_INFO("RAM width %dbits %s\n",
833                  adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
834         return amdgpu_ttm_init(adev);
835 }
836
837 void amdgpu_bo_fini(struct amdgpu_device *adev)
838 {
839         amdgpu_ttm_fini(adev);
840         arch_phys_wc_del(adev->mc.vram_mtrr);
841         arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
842 }
843
844 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
845                              struct vm_area_struct *vma)
846 {
847         return ttm_fbdev_mmap(vma, &bo->tbo);
848 }
849
850 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
851 {
852         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
853
854         if (adev->family <= AMDGPU_FAMILY_CZ &&
855             AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
856                 return -EINVAL;
857
858         bo->tiling_flags = tiling_flags;
859         return 0;
860 }
861
862 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
863 {
864         lockdep_assert_held(&bo->tbo.resv->lock.base);
865
866         if (tiling_flags)
867                 *tiling_flags = bo->tiling_flags;
868 }
869
870 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
871                             uint32_t metadata_size, uint64_t flags)
872 {
873         void *buffer;
874
875         if (!metadata_size) {
876                 if (bo->metadata_size) {
877                         kfree(bo->metadata);
878                         bo->metadata = NULL;
879                         bo->metadata_size = 0;
880                 }
881                 return 0;
882         }
883
884         if (metadata == NULL)
885                 return -EINVAL;
886
887         buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
888         if (buffer == NULL)
889                 return -ENOMEM;
890
891         kfree(bo->metadata);
892         bo->metadata_flags = flags;
893         bo->metadata = buffer;
894         bo->metadata_size = metadata_size;
895
896         return 0;
897 }
898
899 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
900                            size_t buffer_size, uint32_t *metadata_size,
901                            uint64_t *flags)
902 {
903         if (!buffer && !metadata_size)
904                 return -EINVAL;
905
906         if (buffer) {
907                 if (buffer_size < bo->metadata_size)
908                         return -EINVAL;
909
910                 if (bo->metadata_size)
911                         memcpy(buffer, bo->metadata, bo->metadata_size);
912         }
913
914         if (metadata_size)
915                 *metadata_size = bo->metadata_size;
916         if (flags)
917                 *flags = bo->metadata_flags;
918
919         return 0;
920 }
921
922 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
923                            bool evict,
924                            struct ttm_mem_reg *new_mem)
925 {
926         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
927         struct amdgpu_bo *abo;
928         struct ttm_mem_reg *old_mem = &bo->mem;
929
930         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
931                 return;
932
933         abo = container_of(bo, struct amdgpu_bo, tbo);
934         amdgpu_vm_bo_invalidate(adev, abo);
935
936         amdgpu_bo_kunmap(abo);
937
938         /* remember the eviction */
939         if (evict)
940                 atomic64_inc(&adev->num_evictions);
941
942         /* update statistics */
943         if (!new_mem)
944                 return;
945
946         /* move_notify is called before move happens */
947         amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
948
949         trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
950 }
951
952 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
953 {
954         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
955         struct amdgpu_bo *abo;
956         unsigned long offset, size;
957         int r;
958
959         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
960                 return 0;
961
962         abo = container_of(bo, struct amdgpu_bo, tbo);
963
964         /* Remember that this BO was accessed by the CPU */
965         abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
966
967         if (bo->mem.mem_type != TTM_PL_VRAM)
968                 return 0;
969
970         size = bo->mem.num_pages << PAGE_SHIFT;
971         offset = bo->mem.start << PAGE_SHIFT;
972         if ((offset + size) <= adev->mc.visible_vram_size)
973                 return 0;
974
975         /* Can't move a pinned BO to visible VRAM */
976         if (abo->pin_count > 0)
977                 return -EINVAL;
978
979         /* hurrah the memory is not visible ! */
980         atomic64_inc(&adev->num_vram_cpu_page_faults);
981         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
982                                          AMDGPU_GEM_DOMAIN_GTT);
983
984         /* Avoid costly evictions; only set GTT as a busy placement */
985         abo->placement.num_busy_placement = 1;
986         abo->placement.busy_placement = &abo->placements[1];
987
988         r = ttm_bo_validate(bo, &abo->placement, false, false);
989         if (unlikely(r != 0))
990                 return r;
991
992         offset = bo->mem.start << PAGE_SHIFT;
993         /* this should never happen */
994         if (bo->mem.mem_type == TTM_PL_VRAM &&
995             (offset + size) > adev->mc.visible_vram_size)
996                 return -EINVAL;
997
998         return 0;
999 }
1000
1001 /**
1002  * amdgpu_bo_fence - add fence to buffer object
1003  *
1004  * @bo: buffer object in question
1005  * @fence: fence to add
1006  * @shared: true if fence should be added shared
1007  *
1008  */
1009 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1010                      bool shared)
1011 {
1012         struct reservation_object *resv = bo->tbo.resv;
1013
1014         if (shared)
1015                 reservation_object_add_shared_fence(resv, fence);
1016         else
1017                 reservation_object_add_excl_fence(resv, fence);
1018 }
1019
1020 /**
1021  * amdgpu_bo_gpu_offset - return GPU offset of bo
1022  * @bo: amdgpu object for which we query the offset
1023  *
1024  * Returns current GPU offset of the object.
1025  *
1026  * Note: object should either be pinned or reserved when calling this
1027  * function, it might be useful to add check for this for debugging.
1028  */
1029 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1030 {
1031         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1032         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1033                      !amdgpu_ttm_is_bound(bo->tbo.ttm));
1034         WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1035                      !bo->pin_count);
1036         WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1037         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1038                      !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1039
1040         return bo->tbo.offset;
1041 }
This page took 0.097639 seconds and 4 git commands to generate.