]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drm/amdgpu: add AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS flag v3
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39
40
41
42 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
43                                                 struct ttm_mem_reg *mem)
44 {
45         if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
46                 return 0;
47
48         return ((mem->start << PAGE_SHIFT) + mem->size) >
49                 adev->mc.visible_vram_size ?
50                 adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51                 mem->size;
52 }
53
54 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
55                        struct ttm_mem_reg *old_mem,
56                        struct ttm_mem_reg *new_mem)
57 {
58         u64 vis_size;
59         if (!adev)
60                 return;
61
62         if (new_mem) {
63                 switch (new_mem->mem_type) {
64                 case TTM_PL_TT:
65                         atomic64_add(new_mem->size, &adev->gtt_usage);
66                         break;
67                 case TTM_PL_VRAM:
68                         atomic64_add(new_mem->size, &adev->vram_usage);
69                         vis_size = amdgpu_get_vis_part_size(adev, new_mem);
70                         atomic64_add(vis_size, &adev->vram_vis_usage);
71                         break;
72                 }
73         }
74
75         if (old_mem) {
76                 switch (old_mem->mem_type) {
77                 case TTM_PL_TT:
78                         atomic64_sub(old_mem->size, &adev->gtt_usage);
79                         break;
80                 case TTM_PL_VRAM:
81                         atomic64_sub(old_mem->size, &adev->vram_usage);
82                         vis_size = amdgpu_get_vis_part_size(adev, old_mem);
83                         atomic64_sub(vis_size, &adev->vram_vis_usage);
84                         break;
85                 }
86         }
87 }
88
89 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
90 {
91         struct amdgpu_bo *bo;
92
93         bo = container_of(tbo, struct amdgpu_bo, tbo);
94
95         amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
96
97         drm_gem_object_release(&bo->gem_base);
98         amdgpu_bo_unref(&bo->parent);
99         if (!list_empty(&bo->shadow_list)) {
100                 mutex_lock(&bo->adev->shadow_list_lock);
101                 list_del_init(&bo->shadow_list);
102                 mutex_unlock(&bo->adev->shadow_list_lock);
103         }
104         kfree(bo->metadata);
105         kfree(bo);
106 }
107
108 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
109 {
110         if (bo->destroy == &amdgpu_ttm_bo_destroy)
111                 return true;
112         return false;
113 }
114
115 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
116                                       struct ttm_placement *placement,
117                                       struct ttm_place *places,
118                                       u32 domain, u64 flags)
119 {
120         u32 c = 0;
121
122         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
123                 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
124
125                 if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
126                     !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
127                     adev->mc.visible_vram_size < adev->mc.real_vram_size) {
128                         places[c].fpfn = visible_pfn;
129                         places[c].lpfn = 0;
130                         places[c].flags = TTM_PL_FLAG_WC |
131                                 TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
132                                 TTM_PL_FLAG_TOPDOWN;
133                         c++;
134                 }
135
136                 places[c].fpfn = 0;
137                 places[c].lpfn = 0;
138                 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
139                         TTM_PL_FLAG_VRAM;
140                 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
141                         places[c].lpfn = visible_pfn;
142                 else
143                         places[c].flags |= TTM_PL_FLAG_TOPDOWN;
144                 c++;
145         }
146
147         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
148                 places[c].fpfn = 0;
149                 places[c].lpfn = 0;
150                 places[c].flags = TTM_PL_FLAG_TT;
151                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
152                         places[c].flags |= TTM_PL_FLAG_WC |
153                                 TTM_PL_FLAG_UNCACHED;
154                 else
155                         places[c].flags |= TTM_PL_FLAG_CACHED;
156                 c++;
157         }
158
159         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
160                 places[c].fpfn = 0;
161                 places[c].lpfn = 0;
162                 places[c].flags = TTM_PL_FLAG_SYSTEM;
163                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
164                         places[c].flags |= TTM_PL_FLAG_WC |
165                                 TTM_PL_FLAG_UNCACHED;
166                 else
167                         places[c].flags |= TTM_PL_FLAG_CACHED;
168                 c++;
169         }
170
171         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
172                 places[c].fpfn = 0;
173                 places[c].lpfn = 0;
174                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
175                 c++;
176         }
177
178         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
179                 places[c].fpfn = 0;
180                 places[c].lpfn = 0;
181                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
182                 c++;
183         }
184
185         if (domain & AMDGPU_GEM_DOMAIN_OA) {
186                 places[c].fpfn = 0;
187                 places[c].lpfn = 0;
188                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
189                 c++;
190         }
191
192         if (!c) {
193                 places[c].fpfn = 0;
194                 places[c].lpfn = 0;
195                 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
196                 c++;
197         }
198
199         placement->num_placement = c;
200         placement->placement = places;
201
202         placement->num_busy_placement = c;
203         placement->busy_placement = places;
204 }
205
206 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
207 {
208         amdgpu_ttm_placement_init(abo->adev, &abo->placement,
209                                   abo->placements, domain, abo->flags);
210 }
211
212 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
213                                         struct ttm_placement *placement)
214 {
215         BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
216
217         memcpy(bo->placements, placement->placement,
218                placement->num_placement * sizeof(struct ttm_place));
219         bo->placement.num_placement = placement->num_placement;
220         bo->placement.num_busy_placement = placement->num_busy_placement;
221         bo->placement.placement = bo->placements;
222         bo->placement.busy_placement = bo->placements;
223 }
224
225 /**
226  * amdgpu_bo_create_kernel - create BO for kernel use
227  *
228  * @adev: amdgpu device object
229  * @size: size for the new BO
230  * @align: alignment for the new BO
231  * @domain: where to place it
232  * @bo_ptr: resulting BO
233  * @gpu_addr: GPU addr of the pinned BO
234  * @cpu_addr: optional CPU address mapping
235  *
236  * Allocates and pins a BO for kernel internal use.
237  *
238  * Returns 0 on success, negative error code otherwise.
239  */
240 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
241                             unsigned long size, int align,
242                             u32 domain, struct amdgpu_bo **bo_ptr,
243                             u64 *gpu_addr, void **cpu_addr)
244 {
245         int r;
246
247         r = amdgpu_bo_create(adev, size, align, true, domain,
248                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
249                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
250                              NULL, NULL, bo_ptr);
251         if (r) {
252                 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
253                 return r;
254         }
255
256         r = amdgpu_bo_reserve(*bo_ptr, false);
257         if (r) {
258                 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
259                 goto error_free;
260         }
261
262         r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
263         if (r) {
264                 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
265                 goto error_unreserve;
266         }
267
268         if (cpu_addr) {
269                 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
270                 if (r) {
271                         dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
272                         goto error_unreserve;
273                 }
274         }
275
276         amdgpu_bo_unreserve(*bo_ptr);
277
278         return 0;
279
280 error_unreserve:
281         amdgpu_bo_unreserve(*bo_ptr);
282
283 error_free:
284         amdgpu_bo_unref(bo_ptr);
285
286         return r;
287 }
288
289 /**
290  * amdgpu_bo_free_kernel - free BO for kernel use
291  *
292  * @bo: amdgpu BO to free
293  *
294  * unmaps and unpin a BO for kernel internal use.
295  */
296 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
297                            void **cpu_addr)
298 {
299         if (*bo == NULL)
300                 return;
301
302         if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
303                 if (cpu_addr)
304                         amdgpu_bo_kunmap(*bo);
305
306                 amdgpu_bo_unpin(*bo);
307                 amdgpu_bo_unreserve(*bo);
308         }
309         amdgpu_bo_unref(bo);
310
311         if (gpu_addr)
312                 *gpu_addr = 0;
313
314         if (cpu_addr)
315                 *cpu_addr = NULL;
316 }
317
318 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
319                                 unsigned long size, int byte_align,
320                                 bool kernel, u32 domain, u64 flags,
321                                 struct sg_table *sg,
322                                 struct ttm_placement *placement,
323                                 struct reservation_object *resv,
324                                 struct amdgpu_bo **bo_ptr)
325 {
326         struct amdgpu_bo *bo;
327         enum ttm_bo_type type;
328         unsigned long page_align;
329         size_t acc_size;
330         int r;
331
332         page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
333         size = ALIGN(size, PAGE_SIZE);
334
335         if (kernel) {
336                 type = ttm_bo_type_kernel;
337         } else if (sg) {
338                 type = ttm_bo_type_sg;
339         } else {
340                 type = ttm_bo_type_device;
341         }
342         *bo_ptr = NULL;
343
344         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
345                                        sizeof(struct amdgpu_bo));
346
347         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
348         if (bo == NULL)
349                 return -ENOMEM;
350         r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
351         if (unlikely(r)) {
352                 kfree(bo);
353                 return r;
354         }
355         bo->adev = adev;
356         INIT_LIST_HEAD(&bo->shadow_list);
357         INIT_LIST_HEAD(&bo->va);
358         bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
359                                          AMDGPU_GEM_DOMAIN_GTT |
360                                          AMDGPU_GEM_DOMAIN_CPU |
361                                          AMDGPU_GEM_DOMAIN_GDS |
362                                          AMDGPU_GEM_DOMAIN_GWS |
363                                          AMDGPU_GEM_DOMAIN_OA);
364         bo->allowed_domains = bo->prefered_domains;
365         if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
366                 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
367
368         bo->flags = flags;
369
370         /* For architectures that don't support WC memory,
371          * mask out the WC flag from the BO
372          */
373         if (!drm_arch_can_wc_memory())
374                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
375
376         amdgpu_fill_placement_to_bo(bo, placement);
377         /* Kernel allocation are uninterruptible */
378         r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
379                         &bo->placement, page_align, !kernel, NULL,
380                         acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
381         if (unlikely(r != 0)) {
382                 return r;
383         }
384
385         if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
386             bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
387                 struct fence *fence;
388
389                 if (adev->mman.buffer_funcs_ring == NULL ||
390                    !adev->mman.buffer_funcs_ring->ready) {
391                         r = -EBUSY;
392                         goto fail_free;
393                 }
394
395                 r = amdgpu_bo_reserve(bo, false);
396                 if (unlikely(r != 0))
397                         goto fail_free;
398
399                 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
400                 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
401                 if (unlikely(r != 0))
402                         goto fail_unreserve;
403
404                 amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
405                 amdgpu_bo_fence(bo, fence, false);
406                 amdgpu_bo_unreserve(bo);
407                 fence_put(bo->tbo.moving);
408                 bo->tbo.moving = fence_get(fence);
409                 fence_put(fence);
410         }
411         *bo_ptr = bo;
412
413         trace_amdgpu_bo_create(bo);
414
415         return 0;
416
417 fail_unreserve:
418         amdgpu_bo_unreserve(bo);
419 fail_free:
420         amdgpu_bo_unref(&bo);
421         return r;
422 }
423
424 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
425                                    unsigned long size, int byte_align,
426                                    struct amdgpu_bo *bo)
427 {
428         struct ttm_placement placement = {0};
429         struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
430         int r;
431
432         if (bo->shadow)
433                 return 0;
434
435         bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
436         memset(&placements, 0,
437                (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
438
439         amdgpu_ttm_placement_init(adev, &placement,
440                                   placements, AMDGPU_GEM_DOMAIN_GTT,
441                                   AMDGPU_GEM_CREATE_CPU_GTT_USWC);
442
443         r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
444                                         AMDGPU_GEM_DOMAIN_GTT,
445                                         AMDGPU_GEM_CREATE_CPU_GTT_USWC,
446                                         NULL, &placement,
447                                         bo->tbo.resv,
448                                         &bo->shadow);
449         if (!r) {
450                 bo->shadow->parent = amdgpu_bo_ref(bo);
451                 mutex_lock(&adev->shadow_list_lock);
452                 list_add_tail(&bo->shadow_list, &adev->shadow_list);
453                 mutex_unlock(&adev->shadow_list_lock);
454         }
455
456         return r;
457 }
458
459 int amdgpu_bo_create(struct amdgpu_device *adev,
460                      unsigned long size, int byte_align,
461                      bool kernel, u32 domain, u64 flags,
462                      struct sg_table *sg,
463                      struct reservation_object *resv,
464                      struct amdgpu_bo **bo_ptr)
465 {
466         struct ttm_placement placement = {0};
467         struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
468         int r;
469
470         memset(&placements, 0,
471                (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
472
473         amdgpu_ttm_placement_init(adev, &placement,
474                                   placements, domain, flags);
475
476         r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
477                                         domain, flags, sg, &placement,
478                                         resv, bo_ptr);
479         if (r)
480                 return r;
481
482         if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
483                 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
484                 if (r)
485                         amdgpu_bo_unref(bo_ptr);
486         }
487
488         return r;
489 }
490
491 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
492                                struct amdgpu_ring *ring,
493                                struct amdgpu_bo *bo,
494                                struct reservation_object *resv,
495                                struct fence **fence,
496                                bool direct)
497
498 {
499         struct amdgpu_bo *shadow = bo->shadow;
500         uint64_t bo_addr, shadow_addr;
501         int r;
502
503         if (!shadow)
504                 return -EINVAL;
505
506         bo_addr = amdgpu_bo_gpu_offset(bo);
507         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
508
509         r = reservation_object_reserve_shared(bo->tbo.resv);
510         if (r)
511                 goto err;
512
513         r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
514                                amdgpu_bo_size(bo), resv, fence,
515                                direct);
516         if (!r)
517                 amdgpu_bo_fence(bo, *fence, true);
518
519 err:
520         return r;
521 }
522
523 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
524                                   struct amdgpu_ring *ring,
525                                   struct amdgpu_bo *bo,
526                                   struct reservation_object *resv,
527                                   struct fence **fence,
528                                   bool direct)
529
530 {
531         struct amdgpu_bo *shadow = bo->shadow;
532         uint64_t bo_addr, shadow_addr;
533         int r;
534
535         if (!shadow)
536                 return -EINVAL;
537
538         bo_addr = amdgpu_bo_gpu_offset(bo);
539         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
540
541         r = reservation_object_reserve_shared(bo->tbo.resv);
542         if (r)
543                 goto err;
544
545         r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
546                                amdgpu_bo_size(bo), resv, fence,
547                                direct);
548         if (!r)
549                 amdgpu_bo_fence(bo, *fence, true);
550
551 err:
552         return r;
553 }
554
555 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
556 {
557         bool is_iomem;
558         long r;
559
560         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
561                 return -EPERM;
562
563         if (bo->kptr) {
564                 if (ptr) {
565                         *ptr = bo->kptr;
566                 }
567                 return 0;
568         }
569
570         r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
571                                                 MAX_SCHEDULE_TIMEOUT);
572         if (r < 0)
573                 return r;
574
575         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
576         if (r)
577                 return r;
578
579         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
580         if (ptr)
581                 *ptr = bo->kptr;
582
583         return 0;
584 }
585
586 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
587 {
588         if (bo->kptr == NULL)
589                 return;
590         bo->kptr = NULL;
591         ttm_bo_kunmap(&bo->kmap);
592 }
593
594 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
595 {
596         if (bo == NULL)
597                 return NULL;
598
599         ttm_bo_reference(&bo->tbo);
600         return bo;
601 }
602
603 void amdgpu_bo_unref(struct amdgpu_bo **bo)
604 {
605         struct ttm_buffer_object *tbo;
606
607         if ((*bo) == NULL)
608                 return;
609
610         tbo = &((*bo)->tbo);
611         ttm_bo_unref(&tbo);
612         if (tbo == NULL)
613                 *bo = NULL;
614 }
615
616 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
617                              u64 min_offset, u64 max_offset,
618                              u64 *gpu_addr)
619 {
620         int r, i;
621         unsigned fpfn, lpfn;
622
623         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
624                 return -EPERM;
625
626         if (WARN_ON_ONCE(min_offset > max_offset))
627                 return -EINVAL;
628
629         if (bo->pin_count) {
630                 uint32_t mem_type = bo->tbo.mem.mem_type;
631
632                 if (domain != amdgpu_mem_type_to_domain(mem_type))
633                         return -EINVAL;
634
635                 bo->pin_count++;
636                 if (gpu_addr)
637                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
638
639                 if (max_offset != 0) {
640                         u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
641                         WARN_ON_ONCE(max_offset <
642                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
643                 }
644
645                 return 0;
646         }
647
648         bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
649         amdgpu_ttm_placement_from_domain(bo, domain);
650         for (i = 0; i < bo->placement.num_placement; i++) {
651                 /* force to pin into visible video ram */
652                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
653                     !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
654                     (!max_offset || max_offset >
655                      bo->adev->mc.visible_vram_size)) {
656                         if (WARN_ON_ONCE(min_offset >
657                                          bo->adev->mc.visible_vram_size))
658                                 return -EINVAL;
659                         fpfn = min_offset >> PAGE_SHIFT;
660                         lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
661                 } else {
662                         fpfn = min_offset >> PAGE_SHIFT;
663                         lpfn = max_offset >> PAGE_SHIFT;
664                 }
665                 if (fpfn > bo->placements[i].fpfn)
666                         bo->placements[i].fpfn = fpfn;
667                 if (!bo->placements[i].lpfn ||
668                     (lpfn && lpfn < bo->placements[i].lpfn))
669                         bo->placements[i].lpfn = lpfn;
670                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
671         }
672
673         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
674         if (unlikely(r)) {
675                 dev_err(bo->adev->dev, "%p pin failed\n", bo);
676                 goto error;
677         }
678         r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
679         if (unlikely(r)) {
680                 dev_err(bo->adev->dev, "%p bind failed\n", bo);
681                 goto error;
682         }
683
684         bo->pin_count = 1;
685         if (gpu_addr != NULL)
686                 *gpu_addr = amdgpu_bo_gpu_offset(bo);
687         if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
688                 bo->adev->vram_pin_size += amdgpu_bo_size(bo);
689                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
690                         bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
691         } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
692                 bo->adev->gart_pin_size += amdgpu_bo_size(bo);
693         }
694
695 error:
696         return r;
697 }
698
699 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
700 {
701         return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
702 }
703
704 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
705 {
706         int r, i;
707
708         if (!bo->pin_count) {
709                 dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
710                 return 0;
711         }
712         bo->pin_count--;
713         if (bo->pin_count)
714                 return 0;
715         for (i = 0; i < bo->placement.num_placement; i++) {
716                 bo->placements[i].lpfn = 0;
717                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
718         }
719         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
720         if (unlikely(r)) {
721                 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
722                 goto error;
723         }
724
725         if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
726                 bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
727                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
728                         bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
729         } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
730                 bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
731         }
732
733 error:
734         return r;
735 }
736
737 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
738 {
739         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
740         if (0 && (adev->flags & AMD_IS_APU)) {
741                 /* Useless to evict on IGP chips */
742                 return 0;
743         }
744         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
745 }
746
747 static const char *amdgpu_vram_names[] = {
748         "UNKNOWN",
749         "GDDR1",
750         "DDR2",
751         "GDDR3",
752         "GDDR4",
753         "GDDR5",
754         "HBM",
755         "DDR3"
756 };
757
758 int amdgpu_bo_init(struct amdgpu_device *adev)
759 {
760         /* Add an MTRR for the VRAM */
761         adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
762                                               adev->mc.aper_size);
763         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
764                 adev->mc.mc_vram_size >> 20,
765                 (unsigned long long)adev->mc.aper_size >> 20);
766         DRM_INFO("RAM width %dbits %s\n",
767                  adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
768         return amdgpu_ttm_init(adev);
769 }
770
771 void amdgpu_bo_fini(struct amdgpu_device *adev)
772 {
773         amdgpu_ttm_fini(adev);
774         arch_phys_wc_del(adev->mc.vram_mtrr);
775 }
776
777 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
778                              struct vm_area_struct *vma)
779 {
780         return ttm_fbdev_mmap(vma, &bo->tbo);
781 }
782
783 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
784 {
785         if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
786                 return -EINVAL;
787
788         bo->tiling_flags = tiling_flags;
789         return 0;
790 }
791
792 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
793 {
794         lockdep_assert_held(&bo->tbo.resv->lock.base);
795
796         if (tiling_flags)
797                 *tiling_flags = bo->tiling_flags;
798 }
799
800 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
801                             uint32_t metadata_size, uint64_t flags)
802 {
803         void *buffer;
804
805         if (!metadata_size) {
806                 if (bo->metadata_size) {
807                         kfree(bo->metadata);
808                         bo->metadata = NULL;
809                         bo->metadata_size = 0;
810                 }
811                 return 0;
812         }
813
814         if (metadata == NULL)
815                 return -EINVAL;
816
817         buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
818         if (buffer == NULL)
819                 return -ENOMEM;
820
821         kfree(bo->metadata);
822         bo->metadata_flags = flags;
823         bo->metadata = buffer;
824         bo->metadata_size = metadata_size;
825
826         return 0;
827 }
828
829 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
830                            size_t buffer_size, uint32_t *metadata_size,
831                            uint64_t *flags)
832 {
833         if (!buffer && !metadata_size)
834                 return -EINVAL;
835
836         if (buffer) {
837                 if (buffer_size < bo->metadata_size)
838                         return -EINVAL;
839
840                 if (bo->metadata_size)
841                         memcpy(buffer, bo->metadata, bo->metadata_size);
842         }
843
844         if (metadata_size)
845                 *metadata_size = bo->metadata_size;
846         if (flags)
847                 *flags = bo->metadata_flags;
848
849         return 0;
850 }
851
852 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
853                            struct ttm_mem_reg *new_mem)
854 {
855         struct amdgpu_bo *abo;
856         struct ttm_mem_reg *old_mem = &bo->mem;
857
858         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
859                 return;
860
861         abo = container_of(bo, struct amdgpu_bo, tbo);
862         amdgpu_vm_bo_invalidate(abo->adev, abo);
863
864         /* update statistics */
865         if (!new_mem)
866                 return;
867
868         /* move_notify is called before move happens */
869         amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
870
871         trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
872 }
873
874 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
875 {
876         struct amdgpu_device *adev;
877         struct amdgpu_bo *abo;
878         unsigned long offset, size, lpfn;
879         int i, r;
880
881         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
882                 return 0;
883
884         abo = container_of(bo, struct amdgpu_bo, tbo);
885         adev = abo->adev;
886         if (bo->mem.mem_type != TTM_PL_VRAM)
887                 return 0;
888
889         size = bo->mem.num_pages << PAGE_SHIFT;
890         offset = bo->mem.start << PAGE_SHIFT;
891         /* TODO: figure out how to map scattered VRAM to the CPU */
892         if ((offset + size) <= adev->mc.visible_vram_size &&
893             (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
894                 return 0;
895
896         /* Can't move a pinned BO to visible VRAM */
897         if (abo->pin_count > 0)
898                 return -EINVAL;
899
900         /* hurrah the memory is not visible ! */
901         abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
902         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
903         lpfn =  adev->mc.visible_vram_size >> PAGE_SHIFT;
904         for (i = 0; i < abo->placement.num_placement; i++) {
905                 /* Force into visible VRAM */
906                 if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
907                     (!abo->placements[i].lpfn ||
908                      abo->placements[i].lpfn > lpfn))
909                         abo->placements[i].lpfn = lpfn;
910         }
911         r = ttm_bo_validate(bo, &abo->placement, false, false);
912         if (unlikely(r == -ENOMEM)) {
913                 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
914                 return ttm_bo_validate(bo, &abo->placement, false, false);
915         } else if (unlikely(r != 0)) {
916                 return r;
917         }
918
919         offset = bo->mem.start << PAGE_SHIFT;
920         /* this should never happen */
921         if ((offset + size) > adev->mc.visible_vram_size)
922                 return -EINVAL;
923
924         return 0;
925 }
926
927 /**
928  * amdgpu_bo_fence - add fence to buffer object
929  *
930  * @bo: buffer object in question
931  * @fence: fence to add
932  * @shared: true if fence should be added shared
933  *
934  */
935 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
936                      bool shared)
937 {
938         struct reservation_object *resv = bo->tbo.resv;
939
940         if (shared)
941                 reservation_object_add_shared_fence(resv, fence);
942         else
943                 reservation_object_add_excl_fence(resv, fence);
944 }
945
946 /**
947  * amdgpu_bo_gpu_offset - return GPU offset of bo
948  * @bo: amdgpu object for which we query the offset
949  *
950  * Returns current GPU offset of the object.
951  *
952  * Note: object should either be pinned or reserved when calling this
953  * function, it might be useful to add check for this for debugging.
954  */
955 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
956 {
957         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
958         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
959                      !amdgpu_ttm_is_bound(bo->tbo.ttm));
960         WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
961                      !bo->pin_count);
962         WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
963         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
964                      !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
965
966         return bo->tbo.offset;
967 }
This page took 0.092605 seconds and 4 git commands to generate.