]> Git Repo - linux.git/blob - drivers/gpu/drm/loongson/lsdc_ttm.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / gpu / drm / loongson / lsdc_ttm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5
6 #include <drm/drm_drv.h>
7 #include <drm/drm_file.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_prime.h>
11
12 #include "lsdc_drv.h"
13 #include "lsdc_ttm.h"
14
15 const char *lsdc_mem_type_to_str(uint32_t mem_type)
16 {
17         switch (mem_type) {
18         case TTM_PL_VRAM:
19                 return "VRAM";
20         case TTM_PL_TT:
21                 return "GTT";
22         case TTM_PL_SYSTEM:
23                 return "SYSTEM";
24         default:
25                 break;
26         }
27
28         return "Unknown";
29 }
30
31 const char *lsdc_domain_to_str(u32 domain)
32 {
33         switch (domain) {
34         case LSDC_GEM_DOMAIN_VRAM:
35                 return "VRAM";
36         case LSDC_GEM_DOMAIN_GTT:
37                 return "GTT";
38         case LSDC_GEM_DOMAIN_SYSTEM:
39                 return "SYSTEM";
40         default:
41                 break;
42         }
43
44         return "Unknown";
45 }
46
47 static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
48 {
49         u32 c = 0;
50         u32 pflags = 0;
51         u32 i;
52
53         if (lbo->tbo.base.size <= PAGE_SIZE)
54                 pflags |= TTM_PL_FLAG_TOPDOWN;
55
56         lbo->placement.placement = lbo->placements;
57
58         if (domain & LSDC_GEM_DOMAIN_VRAM) {
59                 lbo->placements[c].mem_type = TTM_PL_VRAM;
60                 lbo->placements[c++].flags = pflags;
61         }
62
63         if (domain & LSDC_GEM_DOMAIN_GTT) {
64                 lbo->placements[c].mem_type = TTM_PL_TT;
65                 lbo->placements[c++].flags = pflags;
66         }
67
68         if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
69                 lbo->placements[c].mem_type = TTM_PL_SYSTEM;
70                 lbo->placements[c++].flags = 0;
71         }
72
73         if (!c) {
74                 lbo->placements[c].mem_type = TTM_PL_SYSTEM;
75                 lbo->placements[c++].flags = 0;
76         }
77
78         lbo->placement.num_placement = c;
79
80         for (i = 0; i < c; ++i) {
81                 lbo->placements[i].fpfn = 0;
82                 lbo->placements[i].lpfn = 0;
83         }
84 }
85
86 static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
87 {
88         ttm_tt_fini(tt);
89         kfree(tt);
90 }
91
92 static struct ttm_tt *
93 lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
94 {
95         struct ttm_tt *tt;
96         int ret;
97
98         tt = kzalloc(sizeof(*tt), GFP_KERNEL);
99         if (!tt)
100                 return NULL;
101
102         ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
103         if (ret < 0) {
104                 kfree(tt);
105                 return NULL;
106         }
107
108         return tt;
109 }
110
111 static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
112                                 struct ttm_tt *ttm,
113                                 struct ttm_operation_ctx *ctx)
114 {
115         bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
116
117         if (slave && ttm->sg) {
118                 drm_prime_sg_to_dma_addr_array(ttm->sg,
119                                                ttm->dma_address,
120                                                ttm->num_pages);
121
122                 return 0;
123         }
124
125         return ttm_pool_alloc(&bdev->pool, ttm, ctx);
126 }
127
128 static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
129                                    struct ttm_tt *ttm)
130 {
131         bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
132
133         if (slave)
134                 return;
135
136         return ttm_pool_free(&bdev->pool, ttm);
137 }
138
139 static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
140                                 struct ttm_placement *tplacement)
141 {
142         struct ttm_resource *resource = tbo->resource;
143         struct lsdc_bo *lbo = to_lsdc_bo(tbo);
144
145         switch (resource->mem_type) {
146         case TTM_PL_VRAM:
147                 lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
148                 break;
149         case TTM_PL_TT:
150         default:
151                 lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
152                 break;
153         }
154
155         *tplacement = lbo->placement;
156 }
157
158 static int lsdc_bo_move(struct ttm_buffer_object *tbo,
159                         bool evict,
160                         struct ttm_operation_ctx *ctx,
161                         struct ttm_resource *new_mem,
162                         struct ttm_place *hop)
163 {
164         struct drm_device *ddev = tbo->base.dev;
165         struct ttm_resource *old_mem = tbo->resource;
166         struct lsdc_bo *lbo = to_lsdc_bo(tbo);
167         int ret;
168
169         if (unlikely(tbo->pin_count > 0)) {
170                 drm_warn(ddev, "Can't move a pinned BO\n");
171                 return -EINVAL;
172         }
173
174         ret = ttm_bo_wait_ctx(tbo, ctx);
175         if (ret)
176                 return ret;
177
178         if (!old_mem) {
179                 drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
180                         lbo, lsdc_mem_type_to_str(new_mem->mem_type),
181                         lsdc_bo_size(lbo));
182                 ttm_bo_move_null(tbo, new_mem);
183                 return 0;
184         }
185
186         if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
187                 ttm_bo_move_null(tbo, new_mem);
188                 drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
189                         lbo, lsdc_bo_size(lbo));
190                 return 0;
191         }
192
193         if (old_mem->mem_type == TTM_PL_SYSTEM &&
194             new_mem->mem_type == TTM_PL_TT) {
195                 drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
196                         lbo, lsdc_bo_size(lbo));
197                 ttm_bo_move_null(tbo, new_mem);
198                 return 0;
199         }
200
201         if (old_mem->mem_type == TTM_PL_TT &&
202             new_mem->mem_type == TTM_PL_SYSTEM) {
203                 drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
204                         lbo, lsdc_bo_size(lbo));
205                 ttm_resource_free(tbo, &tbo->resource);
206                 ttm_bo_assign_mem(tbo, new_mem);
207                 return 0;
208         }
209
210         drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
211                 lbo,
212                 lsdc_mem_type_to_str(old_mem->mem_type),
213                 lsdc_mem_type_to_str(new_mem->mem_type),
214                 lsdc_bo_size(lbo));
215
216         return ttm_bo_move_memcpy(tbo, ctx, new_mem);
217 }
218
219 static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
220                                   struct ttm_resource *mem)
221 {
222         struct lsdc_device *ldev = tdev_to_ldev(bdev);
223
224         switch (mem->mem_type) {
225         case TTM_PL_SYSTEM:
226                 break;
227         case TTM_PL_TT:
228                 break;
229         case TTM_PL_VRAM:
230                 mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
231                 mem->bus.is_iomem = true;
232                 mem->bus.caching = ttm_write_combined;
233                 break;
234         default:
235                 return -EINVAL;
236         }
237
238         return 0;
239 }
240
241 static struct ttm_device_funcs lsdc_bo_driver = {
242         .ttm_tt_create = lsdc_ttm_tt_create,
243         .ttm_tt_populate = lsdc_ttm_tt_populate,
244         .ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
245         .ttm_tt_destroy = lsdc_ttm_tt_destroy,
246         .eviction_valuable = ttm_bo_eviction_valuable,
247         .evict_flags = lsdc_bo_evict_flags,
248         .move = lsdc_bo_move,
249         .io_mem_reserve = lsdc_bo_reserve_io_mem,
250 };
251
252 u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
253 {
254         struct ttm_buffer_object *tbo = &lbo->tbo;
255         struct drm_device *ddev = tbo->base.dev;
256         struct ttm_resource *resource = tbo->resource;
257
258         if (unlikely(!tbo->pin_count)) {
259                 drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
260                 return 0;
261         }
262
263         if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
264                 return 0;
265
266         return resource->start << PAGE_SHIFT;
267 }
268
269 size_t lsdc_bo_size(struct lsdc_bo *lbo)
270 {
271         struct ttm_buffer_object *tbo = &lbo->tbo;
272
273         return tbo->base.size;
274 }
275
276 int lsdc_bo_reserve(struct lsdc_bo *lbo)
277 {
278         return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
279 }
280
281 void lsdc_bo_unreserve(struct lsdc_bo *lbo)
282 {
283         return ttm_bo_unreserve(&lbo->tbo);
284 }
285
286 int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
287 {
288         struct ttm_operation_ctx ctx = { false, false };
289         struct ttm_buffer_object *tbo = &lbo->tbo;
290         struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
291         int ret;
292
293         if (tbo->pin_count)
294                 goto bo_pinned;
295
296         if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
297                 return -EINVAL;
298
299         if (domain)
300                 lsdc_bo_set_placement(lbo, domain);
301
302         ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
303         if (unlikely(ret)) {
304                 drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
305                 return ret;
306         }
307
308         if (domain == LSDC_GEM_DOMAIN_VRAM)
309                 ldev->vram_pinned_size += lsdc_bo_size(lbo);
310         else if (domain == LSDC_GEM_DOMAIN_GTT)
311                 ldev->gtt_pinned_size += lsdc_bo_size(lbo);
312
313 bo_pinned:
314         ttm_bo_pin(tbo);
315
316         if (gpu_addr)
317                 *gpu_addr = lsdc_bo_gpu_offset(lbo);
318
319         return 0;
320 }
321
322 void lsdc_bo_unpin(struct lsdc_bo *lbo)
323 {
324         struct ttm_buffer_object *tbo = &lbo->tbo;
325         struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
326
327         if (unlikely(!tbo->pin_count)) {
328                 drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
329                 return;
330         }
331
332         ttm_bo_unpin(tbo);
333
334         if (!tbo->pin_count) {
335                 if (tbo->resource->mem_type == TTM_PL_VRAM)
336                         ldev->vram_pinned_size -= lsdc_bo_size(lbo);
337                 else if (tbo->resource->mem_type == TTM_PL_TT)
338                         ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
339         }
340 }
341
342 void lsdc_bo_ref(struct lsdc_bo *lbo)
343 {
344         struct ttm_buffer_object *tbo = &lbo->tbo;
345
346         ttm_bo_get(tbo);
347 }
348
349 void lsdc_bo_unref(struct lsdc_bo *lbo)
350 {
351         struct ttm_buffer_object *tbo = &lbo->tbo;
352
353         ttm_bo_put(tbo);
354 }
355
356 int lsdc_bo_kmap(struct lsdc_bo *lbo)
357 {
358         struct ttm_buffer_object *tbo = &lbo->tbo;
359         struct drm_gem_object *gem = &tbo->base;
360         struct drm_device *ddev = gem->dev;
361         long ret;
362         int err;
363
364         ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
365                                     MAX_SCHEDULE_TIMEOUT);
366         if (ret < 0) {
367                 drm_warn(ddev, "wait fence timeout\n");
368                 return ret;
369         }
370
371         if (lbo->kptr)
372                 return 0;
373
374         err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
375         if (err) {
376                 drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
377                 return err;
378         }
379
380         lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
381
382         return 0;
383 }
384
385 void lsdc_bo_kunmap(struct lsdc_bo *lbo)
386 {
387         if (!lbo->kptr)
388                 return;
389
390         lbo->kptr = NULL;
391         ttm_bo_kunmap(&lbo->kmap);
392 }
393
394 void lsdc_bo_clear(struct lsdc_bo *lbo)
395 {
396         lsdc_bo_kmap(lbo);
397
398         if (lbo->is_iomem)
399                 memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
400         else
401                 memset(lbo->kptr, 0, lbo->size);
402
403         lsdc_bo_kunmap(lbo);
404 }
405
406 int lsdc_bo_evict_vram(struct drm_device *ddev)
407 {
408         struct lsdc_device *ldev = to_lsdc(ddev);
409         struct ttm_device *bdev = &ldev->bdev;
410         struct ttm_resource_manager *man;
411
412         man = ttm_manager_type(bdev, TTM_PL_VRAM);
413         if (unlikely(!man))
414                 return 0;
415
416         return ttm_resource_manager_evict_all(bdev, man);
417 }
418
419 static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
420 {
421         struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
422         struct lsdc_bo *lbo = to_lsdc_bo(tbo);
423
424         mutex_lock(&ldev->gem.mutex);
425         list_del_init(&lbo->list);
426         mutex_unlock(&ldev->gem.mutex);
427
428         drm_gem_object_release(&tbo->base);
429
430         kfree(lbo);
431 }
432
433 struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
434                                u32 domain,
435                                size_t size,
436                                bool kernel,
437                                struct sg_table *sg,
438                                struct dma_resv *resv)
439 {
440         struct lsdc_device *ldev = to_lsdc(ddev);
441         struct ttm_device *bdev = &ldev->bdev;
442         struct ttm_buffer_object *tbo;
443         struct lsdc_bo *lbo;
444         enum ttm_bo_type bo_type;
445         int ret;
446
447         lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
448         if (!lbo)
449                 return ERR_PTR(-ENOMEM);
450
451         INIT_LIST_HEAD(&lbo->list);
452
453         lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
454                                         LSDC_GEM_DOMAIN_GTT |
455                                         LSDC_GEM_DOMAIN_SYSTEM);
456
457         tbo = &lbo->tbo;
458
459         size = ALIGN(size, PAGE_SIZE);
460
461         ret = drm_gem_object_init(ddev, &tbo->base, size);
462         if (ret) {
463                 kfree(lbo);
464                 return ERR_PTR(ret);
465         }
466
467         tbo->bdev = bdev;
468
469         if (kernel)
470                 bo_type = ttm_bo_type_kernel;
471         else if (sg)
472                 bo_type = ttm_bo_type_sg;
473         else
474                 bo_type = ttm_bo_type_device;
475
476         lsdc_bo_set_placement(lbo, domain);
477         lbo->size = size;
478
479         ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
480                                    false, sg, resv, lsdc_bo_destroy);
481         if (ret) {
482                 kfree(lbo);
483                 return ERR_PTR(ret);
484         }
485
486         return lbo;
487 }
488
489 struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
490                                              u32 domain,
491                                              size_t size)
492 {
493         struct lsdc_bo *lbo;
494         int ret;
495
496         lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
497         if (IS_ERR(lbo))
498                 return ERR_CAST(lbo);
499
500         ret = lsdc_bo_reserve(lbo);
501         if (unlikely(ret)) {
502                 lsdc_bo_unref(lbo);
503                 return ERR_PTR(ret);
504         }
505
506         ret = lsdc_bo_pin(lbo, domain, NULL);
507         lsdc_bo_unreserve(lbo);
508         if (unlikely(ret)) {
509                 lsdc_bo_unref(lbo);
510                 return ERR_PTR(ret);
511         }
512
513         return lbo;
514 }
515
516 void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
517 {
518         int ret;
519
520         ret = lsdc_bo_reserve(lbo);
521         if (unlikely(ret))
522                 return;
523
524         lsdc_bo_unpin(lbo);
525         lsdc_bo_unreserve(lbo);
526
527         lsdc_bo_unref(lbo);
528 }
529
530 static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
531 {
532         struct lsdc_device *ldev = (struct lsdc_device *)data;
533
534         ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
535         ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
536
537         ttm_device_fini(&ldev->bdev);
538
539         drm_dbg(ddev, "ttm finished\n");
540 }
541
542 int lsdc_ttm_init(struct lsdc_device *ldev)
543 {
544         struct drm_device *ddev = &ldev->base;
545         unsigned long num_vram_pages;
546         unsigned long num_gtt_pages;
547         int ret;
548
549         ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
550                               ddev->anon_inode->i_mapping,
551                               ddev->vma_offset_manager, false, true);
552         if (ret)
553                 return ret;
554
555         num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
556
557         ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
558         if (unlikely(ret))
559                 return ret;
560
561         drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
562
563         /* 512M is far enough for us now */
564         ldev->gtt_size = 512 << 20;
565
566         num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
567
568         ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
569         if (unlikely(ret))
570                 return ret;
571
572         drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
573
574         return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
575 }
576
577 void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
578 {
579         struct ttm_device *bdev = &ldev->bdev;
580         struct drm_device *ddev = &ldev->base;
581         struct drm_minor *minor = ddev->primary;
582         struct dentry *root = minor->debugfs_root;
583         struct ttm_resource_manager *vram_man;
584         struct ttm_resource_manager *gtt_man;
585
586         vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
587         gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
588
589         ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
590         ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");
591 }
This page took 0.071959 seconds and 4 git commands to generate.