]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drm/amdgpu: implement the allocation range (v3)
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38
39
40 int amdgpu_ttm_init(struct amdgpu_device *adev);
41 void amdgpu_ttm_fini(struct amdgpu_device *adev);
42
43 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
44                                                 struct ttm_mem_reg *mem)
45 {
46         u64 ret = 0;
47         if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
48                 ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
49                            adev->mc.visible_vram_size ?
50                            adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51                            mem->size;
52         }
53         return ret;
54 }
55
56 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
57                        struct ttm_mem_reg *old_mem,
58                        struct ttm_mem_reg *new_mem)
59 {
60         u64 vis_size;
61         if (!adev)
62                 return;
63
64         if (new_mem) {
65                 switch (new_mem->mem_type) {
66                 case TTM_PL_TT:
67                         atomic64_add(new_mem->size, &adev->gtt_usage);
68                         break;
69                 case TTM_PL_VRAM:
70                         atomic64_add(new_mem->size, &adev->vram_usage);
71                         vis_size = amdgpu_get_vis_part_size(adev, new_mem);
72                         atomic64_add(vis_size, &adev->vram_vis_usage);
73                         break;
74                 }
75         }
76
77         if (old_mem) {
78                 switch (old_mem->mem_type) {
79                 case TTM_PL_TT:
80                         atomic64_sub(old_mem->size, &adev->gtt_usage);
81                         break;
82                 case TTM_PL_VRAM:
83                         atomic64_sub(old_mem->size, &adev->vram_usage);
84                         vis_size = amdgpu_get_vis_part_size(adev, old_mem);
85                         atomic64_sub(vis_size, &adev->vram_vis_usage);
86                         break;
87                 }
88         }
89 }
90
91 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
92 {
93         struct amdgpu_bo *bo;
94
95         bo = container_of(tbo, struct amdgpu_bo, tbo);
96
97         amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
98         amdgpu_mn_unregister(bo);
99
100         mutex_lock(&bo->adev->gem.mutex);
101         list_del_init(&bo->list);
102         mutex_unlock(&bo->adev->gem.mutex);
103         drm_gem_object_release(&bo->gem_base);
104         kfree(bo->metadata);
105         kfree(bo);
106 }
107
108 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
109 {
110         if (bo->destroy == &amdgpu_ttm_bo_destroy)
111                 return true;
112         return false;
113 }
114
115 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
116                                       struct ttm_placement *placement,
117                                       struct ttm_place *placements,
118                                       u32 domain, u64 flags)
119 {
120         u32 c = 0, i;
121
122         placement->placement = placements;
123         placement->busy_placement = placements;
124
125         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
126                 if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
127                         adev->mc.visible_vram_size < adev->mc.real_vram_size) {
128                         placements[c].fpfn =
129                                 adev->mc.visible_vram_size >> PAGE_SHIFT;
130                         placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
131                                 TTM_PL_FLAG_VRAM;
132                 }
133                 placements[c].fpfn = 0;
134                 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
135                         TTM_PL_FLAG_VRAM;
136         }
137
138         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
139                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
140                         placements[c].fpfn = 0;
141                         placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
142                                 TTM_PL_FLAG_UNCACHED;
143                 } else {
144                         placements[c].fpfn = 0;
145                         placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
146                 }
147         }
148
149         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
150                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
151                         placements[c].fpfn = 0;
152                         placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
153                                 TTM_PL_FLAG_UNCACHED;
154                 } else {
155                         placements[c].fpfn = 0;
156                         placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
157                 }
158         }
159
160         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
161                 placements[c].fpfn = 0;
162                 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
163                         AMDGPU_PL_FLAG_GDS;
164         }
165         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
166                 placements[c].fpfn = 0;
167                 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
168                         AMDGPU_PL_FLAG_GWS;
169         }
170         if (domain & AMDGPU_GEM_DOMAIN_OA) {
171                 placements[c].fpfn = 0;
172                 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
173                         AMDGPU_PL_FLAG_OA;
174         }
175
176         if (!c) {
177                 placements[c].fpfn = 0;
178                 placements[c++].flags = TTM_PL_MASK_CACHING |
179                         TTM_PL_FLAG_SYSTEM;
180         }
181         placement->num_placement = c;
182         placement->num_busy_placement = c;
183
184         for (i = 0; i < c; i++) {
185                 if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
186                         (placements[i].flags & TTM_PL_FLAG_VRAM) &&
187                         !placements[i].fpfn)
188                         placements[i].lpfn =
189                                 adev->mc.visible_vram_size >> PAGE_SHIFT;
190                 else
191                         placements[i].lpfn = 0;
192         }
193 }
194
195 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
196 {
197         amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
198                                   rbo->placements, domain, rbo->flags);
199 }
200
201 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
202                                         struct ttm_placement *placement)
203 {
204         BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
205
206         memcpy(bo->placements, placement->placement,
207                placement->num_placement * sizeof(struct ttm_place));
208         bo->placement.num_placement = placement->num_placement;
209         bo->placement.num_busy_placement = placement->num_busy_placement;
210         bo->placement.placement = bo->placements;
211         bo->placement.busy_placement = bo->placements;
212 }
213
214 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215                                 unsigned long size, int byte_align,
216                                 bool kernel, u32 domain, u64 flags,
217                                 struct sg_table *sg,
218                                 struct ttm_placement *placement,
219                                 struct amdgpu_bo **bo_ptr)
220 {
221         struct amdgpu_bo *bo;
222         enum ttm_bo_type type;
223         unsigned long page_align;
224         size_t acc_size;
225         int r;
226
227         /* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
228          * do this as a temporary workaround
229          */
230         if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
231                 if (adev->asic_type >= CHIP_TOPAZ) {
232                         if (byte_align & 0x7fff)
233                                 byte_align = ALIGN(byte_align, 0x8000);
234                         if (size & 0x7fff)
235                                 size = ALIGN(size, 0x8000);
236                 }
237         }
238
239         page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
240         size = ALIGN(size, PAGE_SIZE);
241
242         if (kernel) {
243                 type = ttm_bo_type_kernel;
244         } else if (sg) {
245                 type = ttm_bo_type_sg;
246         } else {
247                 type = ttm_bo_type_device;
248         }
249         *bo_ptr = NULL;
250
251         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
252                                        sizeof(struct amdgpu_bo));
253
254         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
255         if (bo == NULL)
256                 return -ENOMEM;
257         r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
258         if (unlikely(r)) {
259                 kfree(bo);
260                 return r;
261         }
262         bo->adev = adev;
263         INIT_LIST_HEAD(&bo->list);
264         INIT_LIST_HEAD(&bo->va);
265         bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
266                                        AMDGPU_GEM_DOMAIN_GTT |
267                                        AMDGPU_GEM_DOMAIN_CPU |
268                                        AMDGPU_GEM_DOMAIN_GDS |
269                                        AMDGPU_GEM_DOMAIN_GWS |
270                                        AMDGPU_GEM_DOMAIN_OA);
271
272         bo->flags = flags;
273         amdgpu_fill_placement_to_bo(bo, placement);
274         /* Kernel allocation are uninterruptible */
275         down_read(&adev->pm.mclk_lock);
276         r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
277                         &bo->placement, page_align, !kernel, NULL,
278                         acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
279         up_read(&adev->pm.mclk_lock);
280         if (unlikely(r != 0)) {
281                 return r;
282         }
283         *bo_ptr = bo;
284
285         trace_amdgpu_bo_create(bo);
286
287         return 0;
288 }
289
290 int amdgpu_bo_create(struct amdgpu_device *adev,
291                      unsigned long size, int byte_align,
292                      bool kernel, u32 domain, u64 flags,
293                      struct sg_table *sg, struct amdgpu_bo **bo_ptr)
294 {
295         struct ttm_placement placement = {0};
296         struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
297
298         memset(&placements, 0,
299                (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
300
301         amdgpu_ttm_placement_init(adev, &placement,
302                                   placements, domain, flags);
303
304         return amdgpu_bo_create_restricted(adev, size, byte_align,
305                                            kernel, domain, flags,
306                                            sg,
307                                            &placement,
308                                            bo_ptr);
309 }
310
311 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
312 {
313         bool is_iomem;
314         int r;
315
316         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
317                 return -EPERM;
318
319         if (bo->kptr) {
320                 if (ptr) {
321                         *ptr = bo->kptr;
322                 }
323                 return 0;
324         }
325         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
326         if (r) {
327                 return r;
328         }
329         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
330         if (ptr) {
331                 *ptr = bo->kptr;
332         }
333         return 0;
334 }
335
336 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
337 {
338         if (bo->kptr == NULL)
339                 return;
340         bo->kptr = NULL;
341         ttm_bo_kunmap(&bo->kmap);
342 }
343
344 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
345 {
346         if (bo == NULL)
347                 return NULL;
348
349         ttm_bo_reference(&bo->tbo);
350         return bo;
351 }
352
353 void amdgpu_bo_unref(struct amdgpu_bo **bo)
354 {
355         struct ttm_buffer_object *tbo;
356
357         if ((*bo) == NULL)
358                 return;
359
360         tbo = &((*bo)->tbo);
361         ttm_bo_unref(&tbo);
362         if (tbo == NULL)
363                 *bo = NULL;
364 }
365
366 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
367                              u64 min_offset, u64 max_offset,
368                              u64 *gpu_addr)
369 {
370         int r, i;
371         unsigned fpfn, lpfn;
372
373         if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
374                 return -EPERM;
375
376         if (WARN_ON_ONCE(min_offset > max_offset))
377                 return -EINVAL;
378
379         if (bo->pin_count) {
380                 bo->pin_count++;
381                 if (gpu_addr)
382                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
383
384                 if (max_offset != 0) {
385                         u64 domain_start;
386                         if (domain == AMDGPU_GEM_DOMAIN_VRAM)
387                                 domain_start = bo->adev->mc.vram_start;
388                         else
389                                 domain_start = bo->adev->mc.gtt_start;
390                         WARN_ON_ONCE(max_offset <
391                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
392                 }
393
394                 return 0;
395         }
396         amdgpu_ttm_placement_from_domain(bo, domain);
397         for (i = 0; i < bo->placement.num_placement; i++) {
398                 /* force to pin into visible video ram */
399                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
400                     !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
401                     (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
402                         if (WARN_ON_ONCE(min_offset >
403                                          bo->adev->mc.visible_vram_size))
404                                 return -EINVAL;
405                         fpfn = min_offset >> PAGE_SHIFT;
406                         lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
407                 } else {
408                         fpfn = min_offset >> PAGE_SHIFT;
409                         lpfn = max_offset >> PAGE_SHIFT;
410                 }
411                 if (fpfn > bo->placements[i].fpfn)
412                         bo->placements[i].fpfn = fpfn;
413                 if (lpfn && lpfn < bo->placements[i].lpfn)
414                         bo->placements[i].lpfn = lpfn;
415                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
416         }
417
418         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
419         if (likely(r == 0)) {
420                 bo->pin_count = 1;
421                 if (gpu_addr != NULL)
422                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
423                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
424                         bo->adev->vram_pin_size += amdgpu_bo_size(bo);
425                 else
426                         bo->adev->gart_pin_size += amdgpu_bo_size(bo);
427         } else {
428                 dev_err(bo->adev->dev, "%p pin failed\n", bo);
429         }
430         return r;
431 }
432
433 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
434 {
435         return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
436 }
437
438 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
439 {
440         int r, i;
441
442         if (!bo->pin_count) {
443                 dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
444                 return 0;
445         }
446         bo->pin_count--;
447         if (bo->pin_count)
448                 return 0;
449         for (i = 0; i < bo->placement.num_placement; i++) {
450                 bo->placements[i].lpfn = 0;
451                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
452         }
453         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
454         if (likely(r == 0)) {
455                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
456                         bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
457                 else
458                         bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
459         } else {
460                 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
461         }
462         return r;
463 }
464
465 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
466 {
467         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
468         if (0 && (adev->flags & AMDGPU_IS_APU)) {
469                 /* Useless to evict on IGP chips */
470                 return 0;
471         }
472         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
473 }
474
475 void amdgpu_bo_force_delete(struct amdgpu_device *adev)
476 {
477         struct amdgpu_bo *bo, *n;
478
479         if (list_empty(&adev->gem.objects)) {
480                 return;
481         }
482         dev_err(adev->dev, "Userspace still has active objects !\n");
483         list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
484                 mutex_lock(&adev->ddev->struct_mutex);
485                 dev_err(adev->dev, "%p %p %lu %lu force free\n",
486                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
487                         *((unsigned long *)&bo->gem_base.refcount));
488                 mutex_lock(&bo->adev->gem.mutex);
489                 list_del_init(&bo->list);
490                 mutex_unlock(&bo->adev->gem.mutex);
491                 /* this should unref the ttm bo */
492                 drm_gem_object_unreference(&bo->gem_base);
493                 mutex_unlock(&adev->ddev->struct_mutex);
494         }
495 }
496
497 int amdgpu_bo_init(struct amdgpu_device *adev)
498 {
499         /* Add an MTRR for the VRAM */
500         adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
501                                               adev->mc.aper_size);
502         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
503                 adev->mc.mc_vram_size >> 20,
504                 (unsigned long long)adev->mc.aper_size >> 20);
505         DRM_INFO("RAM width %dbits DDR\n",
506                         adev->mc.vram_width);
507         return amdgpu_ttm_init(adev);
508 }
509
510 void amdgpu_bo_fini(struct amdgpu_device *adev)
511 {
512         amdgpu_ttm_fini(adev);
513         arch_phys_wc_del(adev->mc.vram_mtrr);
514 }
515
516 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
517                              struct vm_area_struct *vma)
518 {
519         return ttm_fbdev_mmap(vma, &bo->tbo);
520 }
521
522 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
523 {
524         if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
525                 return -EINVAL;
526
527         bo->tiling_flags = tiling_flags;
528         return 0;
529 }
530
531 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
532 {
533         lockdep_assert_held(&bo->tbo.resv->lock.base);
534
535         if (tiling_flags)
536                 *tiling_flags = bo->tiling_flags;
537 }
538
539 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
540                             uint32_t metadata_size, uint64_t flags)
541 {
542         void *buffer;
543
544         if (!metadata_size) {
545                 if (bo->metadata_size) {
546                         kfree(bo->metadata);
547                         bo->metadata_size = 0;
548                 }
549                 return 0;
550         }
551
552         if (metadata == NULL)
553                 return -EINVAL;
554
555         buffer = kzalloc(metadata_size, GFP_KERNEL);
556         if (buffer == NULL)
557                 return -ENOMEM;
558
559         memcpy(buffer, metadata, metadata_size);
560
561         kfree(bo->metadata);
562         bo->metadata_flags = flags;
563         bo->metadata = buffer;
564         bo->metadata_size = metadata_size;
565
566         return 0;
567 }
568
569 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
570                            size_t buffer_size, uint32_t *metadata_size,
571                            uint64_t *flags)
572 {
573         if (!buffer && !metadata_size)
574                 return -EINVAL;
575
576         if (buffer) {
577                 if (buffer_size < bo->metadata_size)
578                         return -EINVAL;
579
580                 if (bo->metadata_size)
581                         memcpy(buffer, bo->metadata, bo->metadata_size);
582         }
583
584         if (metadata_size)
585                 *metadata_size = bo->metadata_size;
586         if (flags)
587                 *flags = bo->metadata_flags;
588
589         return 0;
590 }
591
592 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
593                            struct ttm_mem_reg *new_mem)
594 {
595         struct amdgpu_bo *rbo;
596
597         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
598                 return;
599
600         rbo = container_of(bo, struct amdgpu_bo, tbo);
601         amdgpu_vm_bo_invalidate(rbo->adev, rbo);
602
603         /* update statistics */
604         if (!new_mem)
605                 return;
606
607         /* move_notify is called before move happens */
608         amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
609 }
610
611 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
612 {
613         struct amdgpu_device *adev;
614         struct amdgpu_bo *abo;
615         unsigned long offset, size, lpfn;
616         int i, r;
617
618         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
619                 return 0;
620
621         abo = container_of(bo, struct amdgpu_bo, tbo);
622         adev = abo->adev;
623         if (bo->mem.mem_type != TTM_PL_VRAM)
624                 return 0;
625
626         size = bo->mem.num_pages << PAGE_SHIFT;
627         offset = bo->mem.start << PAGE_SHIFT;
628         if ((offset + size) <= adev->mc.visible_vram_size)
629                 return 0;
630
631         /* hurrah the memory is not visible ! */
632         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
633         lpfn =  adev->mc.visible_vram_size >> PAGE_SHIFT;
634         for (i = 0; i < abo->placement.num_placement; i++) {
635                 /* Force into visible VRAM */
636                 if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
637                     (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
638                         abo->placements[i].lpfn = lpfn;
639         }
640         r = ttm_bo_validate(bo, &abo->placement, false, false);
641         if (unlikely(r == -ENOMEM)) {
642                 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
643                 return ttm_bo_validate(bo, &abo->placement, false, false);
644         } else if (unlikely(r != 0)) {
645                 return r;
646         }
647
648         offset = bo->mem.start << PAGE_SHIFT;
649         /* this should never happen */
650         if ((offset + size) > adev->mc.visible_vram_size)
651                 return -EINVAL;
652
653         return 0;
654 }
655
656 /**
657  * amdgpu_bo_fence - add fence to buffer object
658  *
659  * @bo: buffer object in question
660  * @fence: fence to add
661  * @shared: true if fence should be added shared
662  *
663  */
664 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
665                      bool shared)
666 {
667         struct reservation_object *resv = bo->tbo.resv;
668
669         if (shared)
670                 reservation_object_add_shared_fence(resv, &fence->base);
671         else
672                 reservation_object_add_excl_fence(resv, &fence->base);
673 }
This page took 0.072937 seconds and 4 git commands to generate.