]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drm/ttm: use an operation ctx for ttm_bo_init_reserved
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39
40 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
41 {
42         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
43         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
44
45         amdgpu_bo_kunmap(bo);
46
47         drm_gem_object_release(&bo->gem_base);
48         amdgpu_bo_unref(&bo->parent);
49         if (!list_empty(&bo->shadow_list)) {
50                 mutex_lock(&adev->shadow_list_lock);
51                 list_del_init(&bo->shadow_list);
52                 mutex_unlock(&adev->shadow_list_lock);
53         }
54         kfree(bo->metadata);
55         kfree(bo);
56 }
57
58 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
59 {
60         if (bo->destroy == &amdgpu_ttm_bo_destroy)
61                 return true;
62         return false;
63 }
64
65 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
66 {
67         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
68         struct ttm_placement *placement = &abo->placement;
69         struct ttm_place *places = abo->placements;
70         u64 flags = abo->flags;
71         u32 c = 0;
72
73         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
74                 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
75
76                 places[c].fpfn = 0;
77                 places[c].lpfn = 0;
78                 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
79                         TTM_PL_FLAG_VRAM;
80
81                 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
82                         places[c].lpfn = visible_pfn;
83                 else
84                         places[c].flags |= TTM_PL_FLAG_TOPDOWN;
85
86                 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
87                         places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
88                 c++;
89         }
90
91         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
92                 places[c].fpfn = 0;
93                 if (flags & AMDGPU_GEM_CREATE_SHADOW)
94                         places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
95                 else
96                         places[c].lpfn = 0;
97                 places[c].flags = TTM_PL_FLAG_TT;
98                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
99                         places[c].flags |= TTM_PL_FLAG_WC |
100                                 TTM_PL_FLAG_UNCACHED;
101                 else
102                         places[c].flags |= TTM_PL_FLAG_CACHED;
103                 c++;
104         }
105
106         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
107                 places[c].fpfn = 0;
108                 places[c].lpfn = 0;
109                 places[c].flags = TTM_PL_FLAG_SYSTEM;
110                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
111                         places[c].flags |= TTM_PL_FLAG_WC |
112                                 TTM_PL_FLAG_UNCACHED;
113                 else
114                         places[c].flags |= TTM_PL_FLAG_CACHED;
115                 c++;
116         }
117
118         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
119                 places[c].fpfn = 0;
120                 places[c].lpfn = 0;
121                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
122                 c++;
123         }
124
125         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
126                 places[c].fpfn = 0;
127                 places[c].lpfn = 0;
128                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
129                 c++;
130         }
131
132         if (domain & AMDGPU_GEM_DOMAIN_OA) {
133                 places[c].fpfn = 0;
134                 places[c].lpfn = 0;
135                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
136                 c++;
137         }
138
139         if (!c) {
140                 places[c].fpfn = 0;
141                 places[c].lpfn = 0;
142                 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
143                 c++;
144         }
145
146         placement->num_placement = c;
147         placement->placement = places;
148
149         placement->num_busy_placement = c;
150         placement->busy_placement = places;
151 }
152
153 /**
154  * amdgpu_bo_create_reserved - create reserved BO for kernel use
155  *
156  * @adev: amdgpu device object
157  * @size: size for the new BO
158  * @align: alignment for the new BO
159  * @domain: where to place it
160  * @bo_ptr: resulting BO
161  * @gpu_addr: GPU addr of the pinned BO
162  * @cpu_addr: optional CPU address mapping
163  *
164  * Allocates and pins a BO for kernel internal use, and returns it still
165  * reserved.
166  *
167  * Returns 0 on success, negative error code otherwise.
168  */
169 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
170                               unsigned long size, int align,
171                               u32 domain, struct amdgpu_bo **bo_ptr,
172                               u64 *gpu_addr, void **cpu_addr)
173 {
174         bool free = false;
175         int r;
176
177         if (!*bo_ptr) {
178                 r = amdgpu_bo_create(adev, size, align, true, domain,
179                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
180                                      AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
181                                      NULL, NULL, 0, bo_ptr);
182                 if (r) {
183                         dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
184                                 r);
185                         return r;
186                 }
187                 free = true;
188         }
189
190         r = amdgpu_bo_reserve(*bo_ptr, false);
191         if (r) {
192                 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
193                 goto error_free;
194         }
195
196         r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
197         if (r) {
198                 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
199                 goto error_unreserve;
200         }
201
202         if (cpu_addr) {
203                 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
204                 if (r) {
205                         dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
206                         goto error_unreserve;
207                 }
208         }
209
210         return 0;
211
212 error_unreserve:
213         amdgpu_bo_unreserve(*bo_ptr);
214
215 error_free:
216         if (free)
217                 amdgpu_bo_unref(bo_ptr);
218
219         return r;
220 }
221
222 /**
223  * amdgpu_bo_create_kernel - create BO for kernel use
224  *
225  * @adev: amdgpu device object
226  * @size: size for the new BO
227  * @align: alignment for the new BO
228  * @domain: where to place it
229  * @bo_ptr: resulting BO
230  * @gpu_addr: GPU addr of the pinned BO
231  * @cpu_addr: optional CPU address mapping
232  *
233  * Allocates and pins a BO for kernel internal use.
234  *
235  * Returns 0 on success, negative error code otherwise.
236  */
237 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
238                             unsigned long size, int align,
239                             u32 domain, struct amdgpu_bo **bo_ptr,
240                             u64 *gpu_addr, void **cpu_addr)
241 {
242         int r;
243
244         r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
245                                       gpu_addr, cpu_addr);
246
247         if (r)
248                 return r;
249
250         amdgpu_bo_unreserve(*bo_ptr);
251
252         return 0;
253 }
254
255 /**
256  * amdgpu_bo_free_kernel - free BO for kernel use
257  *
258  * @bo: amdgpu BO to free
259  *
260  * unmaps and unpin a BO for kernel internal use.
261  */
262 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
263                            void **cpu_addr)
264 {
265         if (*bo == NULL)
266                 return;
267
268         if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
269                 if (cpu_addr)
270                         amdgpu_bo_kunmap(*bo);
271
272                 amdgpu_bo_unpin(*bo);
273                 amdgpu_bo_unreserve(*bo);
274         }
275         amdgpu_bo_unref(bo);
276
277         if (gpu_addr)
278                 *gpu_addr = 0;
279
280         if (cpu_addr)
281                 *cpu_addr = NULL;
282 }
283
284 /* Validate bo size is bit bigger then the request domain */
285 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
286                                           unsigned long size, u32 domain)
287 {
288         struct ttm_mem_type_manager *man = NULL;
289
290         /*
291          * If GTT is part of requested domains the check must succeed to
292          * allow fall back to GTT
293          */
294         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
295                 man = &adev->mman.bdev.man[TTM_PL_TT];
296
297                 if (size < (man->size << PAGE_SHIFT))
298                         return true;
299                 else
300                         goto fail;
301         }
302
303         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
304                 man = &adev->mman.bdev.man[TTM_PL_VRAM];
305
306                 if (size < (man->size << PAGE_SHIFT))
307                         return true;
308                 else
309                         goto fail;
310         }
311
312
313         /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
314         return true;
315
316 fail:
317         DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
318                   man->size << PAGE_SHIFT);
319         return false;
320 }
321
322 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
323                                unsigned long size, int byte_align,
324                                bool kernel, u32 domain, u64 flags,
325                                struct sg_table *sg,
326                                struct reservation_object *resv,
327                                uint64_t init_value,
328                                struct amdgpu_bo **bo_ptr)
329 {
330         struct ttm_operation_ctx ctx = { !kernel, false };
331         struct amdgpu_bo *bo;
332         enum ttm_bo_type type;
333         unsigned long page_align;
334         u64 initial_bytes_moved, bytes_moved;
335         size_t acc_size;
336         int r;
337
338         page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
339         size = ALIGN(size, PAGE_SIZE);
340
341         if (!amdgpu_bo_validate_size(adev, size, domain))
342                 return -ENOMEM;
343
344         if (kernel) {
345                 type = ttm_bo_type_kernel;
346         } else if (sg) {
347                 type = ttm_bo_type_sg;
348         } else {
349                 type = ttm_bo_type_device;
350         }
351         *bo_ptr = NULL;
352
353         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
354                                        sizeof(struct amdgpu_bo));
355
356         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
357         if (bo == NULL)
358                 return -ENOMEM;
359         r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
360         if (unlikely(r)) {
361                 kfree(bo);
362                 return r;
363         }
364         INIT_LIST_HEAD(&bo->shadow_list);
365         INIT_LIST_HEAD(&bo->va);
366         bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
367                                          AMDGPU_GEM_DOMAIN_GTT |
368                                          AMDGPU_GEM_DOMAIN_CPU |
369                                          AMDGPU_GEM_DOMAIN_GDS |
370                                          AMDGPU_GEM_DOMAIN_GWS |
371                                          AMDGPU_GEM_DOMAIN_OA);
372         bo->allowed_domains = bo->preferred_domains;
373         if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
374                 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
375
376         bo->flags = flags;
377
378 #ifdef CONFIG_X86_32
379         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
380          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
381          */
382         bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
383 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
384         /* Don't try to enable write-combining when it can't work, or things
385          * may be slow
386          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
387          */
388
389 #ifndef CONFIG_COMPILE_TEST
390 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
391          thanks to write-combining
392 #endif
393
394         if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
395                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
396                               "better performance thanks to write-combining\n");
397         bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
398 #else
399         /* For architectures that don't support WC memory,
400          * mask out the WC flag from the BO
401          */
402         if (!drm_arch_can_wc_memory())
403                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
404 #endif
405
406         bo->tbo.bdev = &adev->mman.bdev;
407         amdgpu_ttm_placement_from_domain(bo, domain);
408
409         initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
410         /* Kernel allocation are uninterruptible */
411         r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
412                                  &bo->placement, page_align, &ctx, NULL,
413                                  acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
414         if (unlikely(r != 0))
415                 return r;
416
417         bytes_moved = atomic64_read(&adev->num_bytes_moved) -
418                       initial_bytes_moved;
419         if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
420             bo->tbo.mem.mem_type == TTM_PL_VRAM &&
421             bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
422                 amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
423         else
424                 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
425
426         if (kernel)
427                 bo->tbo.priority = 1;
428
429         if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
430             bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
431                 struct dma_fence *fence;
432
433                 r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
434                 if (unlikely(r))
435                         goto fail_unreserve;
436
437                 amdgpu_bo_fence(bo, fence, false);
438                 dma_fence_put(bo->tbo.moving);
439                 bo->tbo.moving = dma_fence_get(fence);
440                 dma_fence_put(fence);
441         }
442         if (!resv)
443                 amdgpu_bo_unreserve(bo);
444         *bo_ptr = bo;
445
446         trace_amdgpu_bo_create(bo);
447
448         /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
449         if (type == ttm_bo_type_device)
450                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
451
452         return 0;
453
454 fail_unreserve:
455         if (!resv)
456                 ww_mutex_unlock(&bo->tbo.resv->lock);
457         amdgpu_bo_unref(&bo);
458         return r;
459 }
460
461 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
462                                    unsigned long size, int byte_align,
463                                    struct amdgpu_bo *bo)
464 {
465         int r;
466
467         if (bo->shadow)
468                 return 0;
469
470         r = amdgpu_bo_do_create(adev, size, byte_align, true,
471                                 AMDGPU_GEM_DOMAIN_GTT,
472                                 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
473                                 AMDGPU_GEM_CREATE_SHADOW,
474                                 NULL, bo->tbo.resv, 0,
475                                 &bo->shadow);
476         if (!r) {
477                 bo->shadow->parent = amdgpu_bo_ref(bo);
478                 mutex_lock(&adev->shadow_list_lock);
479                 list_add_tail(&bo->shadow_list, &adev->shadow_list);
480                 mutex_unlock(&adev->shadow_list_lock);
481         }
482
483         return r;
484 }
485
486 /* init_value will only take effect when flags contains
487  * AMDGPU_GEM_CREATE_VRAM_CLEARED.
488  */
489 int amdgpu_bo_create(struct amdgpu_device *adev,
490                      unsigned long size, int byte_align,
491                      bool kernel, u32 domain, u64 flags,
492                      struct sg_table *sg,
493                      struct reservation_object *resv,
494                      uint64_t init_value,
495                      struct amdgpu_bo **bo_ptr)
496 {
497         uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
498         int r;
499
500         r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
501                                 parent_flags, sg, resv, init_value, bo_ptr);
502         if (r)
503                 return r;
504
505         if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
506                 if (!resv)
507                         WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
508                                                         NULL));
509
510                 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
511
512                 if (!resv)
513                         reservation_object_unlock((*bo_ptr)->tbo.resv);
514
515                 if (r)
516                         amdgpu_bo_unref(bo_ptr);
517         }
518
519         return r;
520 }
521
522 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
523                                struct amdgpu_ring *ring,
524                                struct amdgpu_bo *bo,
525                                struct reservation_object *resv,
526                                struct dma_fence **fence,
527                                bool direct)
528
529 {
530         struct amdgpu_bo *shadow = bo->shadow;
531         uint64_t bo_addr, shadow_addr;
532         int r;
533
534         if (!shadow)
535                 return -EINVAL;
536
537         bo_addr = amdgpu_bo_gpu_offset(bo);
538         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
539
540         r = reservation_object_reserve_shared(bo->tbo.resv);
541         if (r)
542                 goto err;
543
544         r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
545                                amdgpu_bo_size(bo), resv, fence,
546                                direct, false);
547         if (!r)
548                 amdgpu_bo_fence(bo, *fence, true);
549
550 err:
551         return r;
552 }
553
554 int amdgpu_bo_validate(struct amdgpu_bo *bo)
555 {
556         struct ttm_operation_ctx ctx = { false, false };
557         uint32_t domain;
558         int r;
559
560         if (bo->pin_count)
561                 return 0;
562
563         domain = bo->preferred_domains;
564
565 retry:
566         amdgpu_ttm_placement_from_domain(bo, domain);
567         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
568         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
569                 domain = bo->allowed_domains;
570                 goto retry;
571         }
572
573         return r;
574 }
575
576 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
577                                   struct amdgpu_ring *ring,
578                                   struct amdgpu_bo *bo,
579                                   struct reservation_object *resv,
580                                   struct dma_fence **fence,
581                                   bool direct)
582
583 {
584         struct amdgpu_bo *shadow = bo->shadow;
585         uint64_t bo_addr, shadow_addr;
586         int r;
587
588         if (!shadow)
589                 return -EINVAL;
590
591         bo_addr = amdgpu_bo_gpu_offset(bo);
592         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
593
594         r = reservation_object_reserve_shared(bo->tbo.resv);
595         if (r)
596                 goto err;
597
598         r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
599                                amdgpu_bo_size(bo), resv, fence,
600                                direct, false);
601         if (!r)
602                 amdgpu_bo_fence(bo, *fence, true);
603
604 err:
605         return r;
606 }
607
608 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
609 {
610         void *kptr;
611         long r;
612
613         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
614                 return -EPERM;
615
616         kptr = amdgpu_bo_kptr(bo);
617         if (kptr) {
618                 if (ptr)
619                         *ptr = kptr;
620                 return 0;
621         }
622
623         r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
624                                                 MAX_SCHEDULE_TIMEOUT);
625         if (r < 0)
626                 return r;
627
628         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
629         if (r)
630                 return r;
631
632         if (ptr)
633                 *ptr = amdgpu_bo_kptr(bo);
634
635         return 0;
636 }
637
638 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
639 {
640         bool is_iomem;
641
642         return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
643 }
644
645 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
646 {
647         if (bo->kmap.bo)
648                 ttm_bo_kunmap(&bo->kmap);
649 }
650
651 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
652 {
653         if (bo == NULL)
654                 return NULL;
655
656         ttm_bo_reference(&bo->tbo);
657         return bo;
658 }
659
660 void amdgpu_bo_unref(struct amdgpu_bo **bo)
661 {
662         struct ttm_buffer_object *tbo;
663
664         if ((*bo) == NULL)
665                 return;
666
667         tbo = &((*bo)->tbo);
668         ttm_bo_unref(&tbo);
669         if (tbo == NULL)
670                 *bo = NULL;
671 }
672
673 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
674                              u64 min_offset, u64 max_offset,
675                              u64 *gpu_addr)
676 {
677         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
678         struct ttm_operation_ctx ctx = { false, false };
679         int r, i;
680
681         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
682                 return -EPERM;
683
684         if (WARN_ON_ONCE(min_offset > max_offset))
685                 return -EINVAL;
686
687         /* A shared bo cannot be migrated to VRAM */
688         if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
689                 return -EINVAL;
690
691         if (bo->pin_count) {
692                 uint32_t mem_type = bo->tbo.mem.mem_type;
693
694                 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
695                         return -EINVAL;
696
697                 bo->pin_count++;
698                 if (gpu_addr)
699                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
700
701                 if (max_offset != 0) {
702                         u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
703                         WARN_ON_ONCE(max_offset <
704                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
705                 }
706
707                 return 0;
708         }
709
710         bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
711         /* force to pin into visible video ram */
712         if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
713                 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
714         amdgpu_ttm_placement_from_domain(bo, domain);
715         for (i = 0; i < bo->placement.num_placement; i++) {
716                 unsigned fpfn, lpfn;
717
718                 fpfn = min_offset >> PAGE_SHIFT;
719                 lpfn = max_offset >> PAGE_SHIFT;
720
721                 if (fpfn > bo->placements[i].fpfn)
722                         bo->placements[i].fpfn = fpfn;
723                 if (!bo->placements[i].lpfn ||
724                     (lpfn && lpfn < bo->placements[i].lpfn))
725                         bo->placements[i].lpfn = lpfn;
726                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
727         }
728
729         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
730         if (unlikely(r)) {
731                 dev_err(adev->dev, "%p pin failed\n", bo);
732                 goto error;
733         }
734
735         r = amdgpu_ttm_alloc_gart(&bo->tbo);
736         if (unlikely(r)) {
737                 dev_err(adev->dev, "%p bind failed\n", bo);
738                 goto error;
739         }
740
741         bo->pin_count = 1;
742         if (gpu_addr != NULL)
743                 *gpu_addr = amdgpu_bo_gpu_offset(bo);
744
745         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
746         if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
747                 adev->vram_pin_size += amdgpu_bo_size(bo);
748                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
749                         adev->invisible_pin_size += amdgpu_bo_size(bo);
750         } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
751                 adev->gart_pin_size += amdgpu_bo_size(bo);
752         }
753
754 error:
755         return r;
756 }
757
758 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
759 {
760         return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
761 }
762
763 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
764 {
765         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
766         struct ttm_operation_ctx ctx = { false, false };
767         int r, i;
768
769         if (!bo->pin_count) {
770                 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
771                 return 0;
772         }
773         bo->pin_count--;
774         if (bo->pin_count)
775                 return 0;
776         for (i = 0; i < bo->placement.num_placement; i++) {
777                 bo->placements[i].lpfn = 0;
778                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
779         }
780         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
781         if (unlikely(r)) {
782                 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
783                 goto error;
784         }
785
786         if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
787                 adev->vram_pin_size -= amdgpu_bo_size(bo);
788                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
789                         adev->invisible_pin_size -= amdgpu_bo_size(bo);
790         } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
791                 adev->gart_pin_size -= amdgpu_bo_size(bo);
792         }
793
794 error:
795         return r;
796 }
797
798 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
799 {
800         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
801         if (0 && (adev->flags & AMD_IS_APU)) {
802                 /* Useless to evict on IGP chips */
803                 return 0;
804         }
805         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
806 }
807
808 static const char *amdgpu_vram_names[] = {
809         "UNKNOWN",
810         "GDDR1",
811         "DDR2",
812         "GDDR3",
813         "GDDR4",
814         "GDDR5",
815         "HBM",
816         "DDR3"
817 };
818
819 int amdgpu_bo_init(struct amdgpu_device *adev)
820 {
821         /* reserve PAT memory space to WC for VRAM */
822         arch_io_reserve_memtype_wc(adev->mc.aper_base,
823                                    adev->mc.aper_size);
824
825         /* Add an MTRR for the VRAM */
826         adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
827                                               adev->mc.aper_size);
828         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
829                  adev->mc.mc_vram_size >> 20,
830                  (unsigned long long)adev->mc.aper_size >> 20);
831         DRM_INFO("RAM width %dbits %s\n",
832                  adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
833         return amdgpu_ttm_init(adev);
834 }
835
836 void amdgpu_bo_fini(struct amdgpu_device *adev)
837 {
838         amdgpu_ttm_fini(adev);
839         arch_phys_wc_del(adev->mc.vram_mtrr);
840         arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
841 }
842
843 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
844                              struct vm_area_struct *vma)
845 {
846         return ttm_fbdev_mmap(vma, &bo->tbo);
847 }
848
849 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
850 {
851         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
852
853         if (adev->family <= AMDGPU_FAMILY_CZ &&
854             AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
855                 return -EINVAL;
856
857         bo->tiling_flags = tiling_flags;
858         return 0;
859 }
860
861 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
862 {
863         lockdep_assert_held(&bo->tbo.resv->lock.base);
864
865         if (tiling_flags)
866                 *tiling_flags = bo->tiling_flags;
867 }
868
869 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
870                             uint32_t metadata_size, uint64_t flags)
871 {
872         void *buffer;
873
874         if (!metadata_size) {
875                 if (bo->metadata_size) {
876                         kfree(bo->metadata);
877                         bo->metadata = NULL;
878                         bo->metadata_size = 0;
879                 }
880                 return 0;
881         }
882
883         if (metadata == NULL)
884                 return -EINVAL;
885
886         buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
887         if (buffer == NULL)
888                 return -ENOMEM;
889
890         kfree(bo->metadata);
891         bo->metadata_flags = flags;
892         bo->metadata = buffer;
893         bo->metadata_size = metadata_size;
894
895         return 0;
896 }
897
898 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
899                            size_t buffer_size, uint32_t *metadata_size,
900                            uint64_t *flags)
901 {
902         if (!buffer && !metadata_size)
903                 return -EINVAL;
904
905         if (buffer) {
906                 if (buffer_size < bo->metadata_size)
907                         return -EINVAL;
908
909                 if (bo->metadata_size)
910                         memcpy(buffer, bo->metadata, bo->metadata_size);
911         }
912
913         if (metadata_size)
914                 *metadata_size = bo->metadata_size;
915         if (flags)
916                 *flags = bo->metadata_flags;
917
918         return 0;
919 }
920
921 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
922                            bool evict,
923                            struct ttm_mem_reg *new_mem)
924 {
925         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
926         struct amdgpu_bo *abo;
927         struct ttm_mem_reg *old_mem = &bo->mem;
928
929         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
930                 return;
931
932         abo = ttm_to_amdgpu_bo(bo);
933         amdgpu_vm_bo_invalidate(adev, abo, evict);
934
935         amdgpu_bo_kunmap(abo);
936
937         /* remember the eviction */
938         if (evict)
939                 atomic64_inc(&adev->num_evictions);
940
941         /* update statistics */
942         if (!new_mem)
943                 return;
944
945         /* move_notify is called before move happens */
946         trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
947 }
948
949 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
950 {
951         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
952         struct ttm_operation_ctx ctx = { false, false };
953         struct amdgpu_bo *abo;
954         unsigned long offset, size;
955         int r;
956
957         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
958                 return 0;
959
960         abo = ttm_to_amdgpu_bo(bo);
961
962         /* Remember that this BO was accessed by the CPU */
963         abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
964
965         if (bo->mem.mem_type != TTM_PL_VRAM)
966                 return 0;
967
968         size = bo->mem.num_pages << PAGE_SHIFT;
969         offset = bo->mem.start << PAGE_SHIFT;
970         if ((offset + size) <= adev->mc.visible_vram_size)
971                 return 0;
972
973         /* Can't move a pinned BO to visible VRAM */
974         if (abo->pin_count > 0)
975                 return -EINVAL;
976
977         /* hurrah the memory is not visible ! */
978         atomic64_inc(&adev->num_vram_cpu_page_faults);
979         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
980                                          AMDGPU_GEM_DOMAIN_GTT);
981
982         /* Avoid costly evictions; only set GTT as a busy placement */
983         abo->placement.num_busy_placement = 1;
984         abo->placement.busy_placement = &abo->placements[1];
985
986         r = ttm_bo_validate(bo, &abo->placement, &ctx);
987         if (unlikely(r != 0))
988                 return r;
989
990         offset = bo->mem.start << PAGE_SHIFT;
991         /* this should never happen */
992         if (bo->mem.mem_type == TTM_PL_VRAM &&
993             (offset + size) > adev->mc.visible_vram_size)
994                 return -EINVAL;
995
996         return 0;
997 }
998
999 /**
1000  * amdgpu_bo_fence - add fence to buffer object
1001  *
1002  * @bo: buffer object in question
1003  * @fence: fence to add
1004  * @shared: true if fence should be added shared
1005  *
1006  */
1007 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1008                      bool shared)
1009 {
1010         struct reservation_object *resv = bo->tbo.resv;
1011
1012         if (shared)
1013                 reservation_object_add_shared_fence(resv, fence);
1014         else
1015                 reservation_object_add_excl_fence(resv, fence);
1016 }
1017
1018 /**
1019  * amdgpu_bo_gpu_offset - return GPU offset of bo
1020  * @bo: amdgpu object for which we query the offset
1021  *
1022  * Returns current GPU offset of the object.
1023  *
1024  * Note: object should either be pinned or reserved when calling this
1025  * function, it might be useful to add check for this for debugging.
1026  */
1027 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1028 {
1029         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1030         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1031                      !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
1032         WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1033                      !bo->pin_count);
1034         WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1035         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1036                      !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1037
1038         return bo->tbo.offset;
1039 }
This page took 0.096492 seconds and 4 git commands to generate.