]> Git Repo - linux.git/blob - drivers/accel/ivpu/ivpu_mmu_context.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / drivers / accel / ivpu / ivpu_mmu_context.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
8 #include <linux/set_memory.h>
9 #include <linux/vmalloc.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "ivpu_drv.h"
14 #include "ivpu_hw.h"
15 #include "ivpu_mmu.h"
16 #include "ivpu_mmu_context.h"
17
18 #define IVPU_MMU_VPU_ADDRESS_MASK        GENMASK(47, 12)
19 #define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
20 #define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
21 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
22 #define IVPU_MMU_PTE_INDEX_MASK          GENMASK(20, 12)
23 #define IVPU_MMU_ENTRY_FLAGS_MASK        (BIT(52) | GENMASK(11, 0))
24 #define IVPU_MMU_ENTRY_FLAG_CONT         BIT(52)
25 #define IVPU_MMU_ENTRY_FLAG_NG           BIT(11)
26 #define IVPU_MMU_ENTRY_FLAG_AF           BIT(10)
27 #define IVPU_MMU_ENTRY_FLAG_RO           BIT(7)
28 #define IVPU_MMU_ENTRY_FLAG_USER         BIT(6)
29 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
30 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE    BIT(1)
31 #define IVPU_MMU_ENTRY_FLAG_VALID        BIT(0)
32
33 #define IVPU_MMU_PAGE_SIZE       SZ_4K
34 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
35 #define IVPU_MMU_PTE_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
36 #define IVPU_MMU_PMD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
37 #define IVPU_MMU_PUD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
38 #define IVPU_MMU_PGD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
39 #define IVPU_MMU_PGTABLE_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
40
41 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
42 #define IVPU_MMU_ENTRY_VALID   (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
43 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
44 #define IVPU_MMU_ENTRY_MAPPED  (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
45                                 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
46
47 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
48 {
49         dma_addr_t dma_addr;
50         struct page *page;
51         void *cpu;
52
53         page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
54         if (!page)
55                 return NULL;
56
57         set_pages_array_wc(&page, 1);
58
59         dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
60         if (dma_mapping_error(vdev->drm.dev, dma_addr))
61                 goto err_free_page;
62
63         cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
64         if (!cpu)
65                 goto err_dma_unmap_page;
66
67
68         *dma = dma_addr;
69         return cpu;
70
71 err_dma_unmap_page:
72         dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
73
74 err_free_page:
75         put_page(page);
76         return NULL;
77 }
78
79 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
80 {
81         struct page *page;
82
83         if (cpu_addr) {
84                 page = vmalloc_to_page(cpu_addr);
85                 vunmap(cpu_addr);
86                 dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
87                                DMA_BIDIRECTIONAL);
88                 set_pages_array_wb(&page, 1);
89                 put_page(page);
90         }
91 }
92
93 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
94 {
95         dma_addr_t pgd_dma;
96
97         pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
98         if (!pgtable->pgd_dma_ptr)
99                 return -ENOMEM;
100
101         pgtable->pgd_dma = pgd_dma;
102
103         return 0;
104 }
105
106 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
107 {
108         int pgd_idx, pud_idx, pmd_idx;
109         dma_addr_t pud_dma, pmd_dma, pte_dma;
110         u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
111
112         for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
113                 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
114                 pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
115
116                 if (!pud_dma_ptr)
117                         continue;
118
119                 for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
120                         pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
121                         pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
122
123                         if (!pmd_dma_ptr)
124                                 continue;
125
126                         for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
127                                 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
128                                 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
129
130                                 ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
131                         }
132
133                         kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
134                         ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
135                 }
136
137                 kfree(pgtable->pmd_ptrs[pgd_idx]);
138                 kfree(pgtable->pte_ptrs[pgd_idx]);
139                 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
140         }
141
142         ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
143 }
144
145 static u64*
146 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
147 {
148         u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
149         dma_addr_t pud_dma;
150
151         if (pud_dma_ptr)
152                 return pud_dma_ptr;
153
154         pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
155         if (!pud_dma_ptr)
156                 return NULL;
157
158         drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
159         pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
160         if (!pgtable->pmd_ptrs[pgd_idx])
161                 goto err_free_pud_dma_ptr;
162
163         drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
164         pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
165         if (!pgtable->pte_ptrs[pgd_idx])
166                 goto err_free_pmd_ptrs;
167
168         pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
169         pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
170
171         return pud_dma_ptr;
172
173 err_free_pmd_ptrs:
174         kfree(pgtable->pmd_ptrs[pgd_idx]);
175
176 err_free_pud_dma_ptr:
177         ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
178         return NULL;
179 }
180
181 static u64*
182 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
183                     int pud_idx)
184 {
185         u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
186         dma_addr_t pmd_dma;
187
188         if (pmd_dma_ptr)
189                 return pmd_dma_ptr;
190
191         pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
192         if (!pmd_dma_ptr)
193                 return NULL;
194
195         drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
196         pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
197         if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
198                 goto err_free_pmd_dma_ptr;
199
200         pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
201         pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
202
203         return pmd_dma_ptr;
204
205 err_free_pmd_dma_ptr:
206         ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
207         return NULL;
208 }
209
210 static u64*
211 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
212                     int pgd_idx, int pud_idx, int pmd_idx)
213 {
214         u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
215         dma_addr_t pte_dma;
216
217         if (pte_dma_ptr)
218                 return pte_dma_ptr;
219
220         pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
221         if (!pte_dma_ptr)
222                 return NULL;
223
224         pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
225         pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
226
227         return pte_dma_ptr;
228 }
229
230 static int
231 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
232                           u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
233 {
234         u64 *pte;
235         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
236         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
237         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
238         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
239
240         /* Allocate PUD - second level page table if needed */
241         if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
242                 return -ENOMEM;
243
244         /* Allocate PMD - third level page table if needed */
245         if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
246                 return -ENOMEM;
247
248         /* Allocate PTE - fourth level page table if needed */
249         pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
250         if (!pte)
251                 return -ENOMEM;
252
253         /* Update PTE */
254         pte[pte_idx] = dma_addr | prot;
255
256         return 0;
257 }
258
259 static int
260 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
261                               dma_addr_t dma_addr, u64 prot)
262 {
263         size_t size = IVPU_MMU_CONT_PAGES_SIZE;
264
265         drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
266         drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
267
268         prot |= IVPU_MMU_ENTRY_FLAG_CONT;
269
270         while (size) {
271                 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
272
273                 if (ret)
274                         return ret;
275
276                 size -= IVPU_MMU_PAGE_SIZE;
277                 vpu_addr += IVPU_MMU_PAGE_SIZE;
278                 dma_addr += IVPU_MMU_PAGE_SIZE;
279         }
280
281         return 0;
282 }
283
284 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
285 {
286         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
287         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
288         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
289         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
290
291         /* Update PTE with dummy physical address and clear flags */
292         ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
293 }
294
295 static int
296 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
297                            u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
298 {
299         int map_size;
300         int ret;
301
302         while (size) {
303                 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
304                     IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
305                         ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
306                         map_size = IVPU_MMU_CONT_PAGES_SIZE;
307                 } else {
308                         ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
309                         map_size = IVPU_MMU_PAGE_SIZE;
310                 }
311
312                 if (ret)
313                         return ret;
314
315                 vpu_addr += map_size;
316                 dma_addr += map_size;
317                 size -= map_size;
318         }
319
320         return 0;
321 }
322
323 static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
324                                          u64 vpu_addr)
325 {
326         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
327         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
328         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
329         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
330
331         ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;
332 }
333
334 static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
335                                         u64 vpu_addr)
336 {
337         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
338         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
339         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
340         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
341
342         ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;
343 }
344
345 static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
346                                             u64 vpu_addr)
347 {
348         u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
349         u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
350         u64 offset = 0;
351
352         ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
353
354         while (start + offset < end) {
355                 ivpu_mmu_context_split_page(vdev, ctx, start + offset);
356                 offset += IVPU_MMU_PAGE_SIZE;
357         }
358 }
359
360 int
361 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
362                               size_t size)
363 {
364         u64 end = vpu_addr + size;
365         size_t size_left = size;
366         int ret;
367
368         if (size == 0)
369                 return 0;
370
371         if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
372                 return -EINVAL;
373
374         mutex_lock(&ctx->lock);
375
376         ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
377                  ctx->id, vpu_addr, size);
378
379         if (!ivpu_disable_mmu_cont_pages) {
380                 /* Split 64K contiguous page at the beginning if needed */
381                 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))
382                         ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
383
384                 /* Split 64K contiguous page at the end if needed */
385                 if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))
386                         ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
387         }
388
389         while (size_left) {
390                 if (vpu_addr < end)
391                         ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
392
393                 vpu_addr += IVPU_MMU_PAGE_SIZE;
394                 size_left -= IVPU_MMU_PAGE_SIZE;
395         }
396
397         /* Ensure page table modifications are flushed from wc buffers to memory */
398         wmb();
399
400         mutex_unlock(&ctx->lock);
401         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
402         if (ret)
403                 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
404
405         return 0;
406 }
407
408 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
409 {
410         while (size) {
411                 ivpu_mmu_context_unmap_page(ctx, vpu_addr);
412                 vpu_addr += IVPU_MMU_PAGE_SIZE;
413                 size -= IVPU_MMU_PAGE_SIZE;
414         }
415 }
416
417 int
418 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
419                          u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
420 {
421         struct scatterlist *sg;
422         int ret;
423         u64 prot;
424         u64 i;
425
426         if (drm_WARN_ON(&vdev->drm, !ctx))
427                 return -EINVAL;
428
429         if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
430                 return -EINVAL;
431
432         if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
433                 return -EINVAL;
434
435         prot = IVPU_MMU_ENTRY_MAPPED;
436         if (llc_coherent)
437                 prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
438
439         mutex_lock(&ctx->lock);
440
441         for_each_sgtable_dma_sg(sgt, sg, i) {
442                 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
443                 size_t size = sg_dma_len(sg) + sg->offset;
444
445                 ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
446                          ctx->id, dma_addr, vpu_addr, size);
447
448                 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
449                 if (ret) {
450                         ivpu_err(vdev, "Failed to map context pages\n");
451                         mutex_unlock(&ctx->lock);
452                         return ret;
453                 }
454                 vpu_addr += size;
455         }
456
457         /* Ensure page table modifications are flushed from wc buffers to memory */
458         wmb();
459
460         mutex_unlock(&ctx->lock);
461
462         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
463         if (ret)
464                 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
465         return ret;
466 }
467
468 void
469 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
470                            u64 vpu_addr, struct sg_table *sgt)
471 {
472         struct scatterlist *sg;
473         int ret;
474         u64 i;
475
476         if (drm_WARN_ON(&vdev->drm, !ctx))
477                 return;
478
479         mutex_lock(&ctx->lock);
480
481         for_each_sgtable_dma_sg(sgt, sg, i) {
482                 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
483                 size_t size = sg_dma_len(sg) + sg->offset;
484
485                 ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
486                          ctx->id, dma_addr, vpu_addr, size);
487
488                 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
489                 vpu_addr += size;
490         }
491
492         /* Ensure page table modifications are flushed from wc buffers to memory */
493         wmb();
494
495         mutex_unlock(&ctx->lock);
496
497         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
498         if (ret)
499                 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
500 }
501
502 int
503 ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
504                              u64 size, struct drm_mm_node *node)
505 {
506         int ret;
507
508         WARN_ON(!range);
509
510         mutex_lock(&ctx->lock);
511         if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
512                 ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
513                                                   range->start, range->end, DRM_MM_INSERT_BEST);
514                 if (!ret)
515                         goto unlock;
516         }
517
518         ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
519                                           range->start, range->end, DRM_MM_INSERT_BEST);
520 unlock:
521         mutex_unlock(&ctx->lock);
522         return ret;
523 }
524
525 void
526 ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
527 {
528         mutex_lock(&ctx->lock);
529         drm_mm_remove_node(node);
530         mutex_unlock(&ctx->lock);
531 }
532
533 static int
534 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
535 {
536         u64 start, end;
537         int ret;
538
539         mutex_init(&ctx->lock);
540
541         ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
542         if (ret) {
543                 ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
544                 return ret;
545         }
546
547         if (!context_id) {
548                 start = vdev->hw->ranges.global.start;
549                 end = vdev->hw->ranges.shave.end;
550         } else {
551                 start = vdev->hw->ranges.user.start;
552                 end = vdev->hw->ranges.dma.end;
553         }
554
555         drm_mm_init(&ctx->mm, start, end - start);
556         ctx->id = context_id;
557
558         return 0;
559 }
560
561 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
562 {
563         if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
564                 return;
565
566         mutex_destroy(&ctx->lock);
567         ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
568         drm_mm_takedown(&ctx->mm);
569
570         ctx->pgtable.pgd_dma_ptr = NULL;
571         ctx->pgtable.pgd_dma = 0;
572 }
573
574 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
575 {
576         return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
577 }
578
579 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
580 {
581         return ivpu_mmu_context_fini(vdev, &vdev->gctx);
582 }
583
584 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
585 {
586         return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
587 }
588
589 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
590 {
591         return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
592 }
593
594 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
595 {
596         struct ivpu_file_priv *file_priv;
597
598         xa_lock(&vdev->context_xa);
599
600         file_priv = xa_load(&vdev->context_xa, ssid);
601         if (file_priv)
602                 file_priv->has_mmu_faults = true;
603
604         xa_unlock(&vdev->context_xa);
605 }
606
607 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
608 {
609         int ret;
610
611         drm_WARN_ON(&vdev->drm, !ctx_id);
612
613         ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
614         if (ret) {
615                 ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
616                 return ret;
617         }
618
619         ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
620         if (ret) {
621                 ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
622                 goto err_context_fini;
623         }
624
625         return 0;
626
627 err_context_fini:
628         ivpu_mmu_context_fini(vdev, ctx);
629         return ret;
630 }
631
632 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
633 {
634         drm_WARN_ON(&vdev->drm, !ctx->id);
635
636         ivpu_mmu_clear_pgtable(vdev, ctx->id);
637         ivpu_mmu_context_fini(vdev, ctx);
638 }
This page took 0.101463 seconds and 4 git commands to generate.