]> Git Repo - J-linux.git/blob - drivers/gpu/drm/v3d/v3d_mmu.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / v3d / v3d_mmu.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2017-2018 Broadcom */
3
4 /**
5  * DOC: Broadcom V3D MMU
6  *
7  * The V3D 3.x hardware (compared to VC4) now includes an MMU.  It has
8  * a single level of page tables for the V3D's 4GB address space to
9  * map to AXI bus addresses, thus it could need up to 4MB of
10  * physically contiguous memory to store the PTEs.
11  *
12  * Because the 4MB of contiguous memory for page tables is precious,
13  * and switching between them is expensive, we load all BOs into the
14  * same 4GB address space.
15  *
16  * To protect clients from each other, we should use the GMP to
17  * quickly mask out (at 128kb granularity) what pages are available to
18  * each client.  This is not yet implemented.
19  */
20
21 #include "v3d_drv.h"
22 #include "v3d_regs.h"
23
24 /* Note: All PTEs for the 1MB superpage must be filled with the
25  * superpage bit set.
26  */
27 #define V3D_PTE_SUPERPAGE BIT(31)
28 #define V3D_PTE_BIGPAGE BIT(30)
29 #define V3D_PTE_WRITEABLE BIT(29)
30 #define V3D_PTE_VALID BIT(28)
31
32 static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment)
33 {
34         return IS_ALIGNED(page, alignment >> V3D_MMU_PAGE_SHIFT) &&
35                 IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT);
36 }
37
38 int v3d_mmu_flush_all(struct v3d_dev *v3d)
39 {
40         int ret;
41
42         V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH |
43                   V3D_MMUC_CONTROL_ENABLE);
44
45         ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
46                          V3D_MMUC_CONTROL_FLUSHING), 100);
47         if (ret) {
48                 dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
49                 return ret;
50         }
51
52         V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
53                   V3D_MMU_CTL_TLB_CLEAR);
54
55         ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
56                          V3D_MMU_CTL_TLB_CLEARING), 100);
57         if (ret)
58                 dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n");
59
60         return ret;
61 }
62
63 int v3d_mmu_set_page_table(struct v3d_dev *v3d)
64 {
65         V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
66         V3D_WRITE(V3D_MMU_CTL,
67                   V3D_MMU_CTL_ENABLE |
68                   V3D_MMU_CTL_PT_INVALID_ENABLE |
69                   V3D_MMU_CTL_PT_INVALID_ABORT |
70                   V3D_MMU_CTL_PT_INVALID_INT |
71                   V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
72                   V3D_MMU_CTL_WRITE_VIOLATION_INT |
73                   V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
74                   V3D_MMU_CTL_CAP_EXCEEDED_INT);
75         V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
76                   (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
77                   V3D_MMU_ILLEGAL_ADDR_ENABLE);
78         V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
79
80         return v3d_mmu_flush_all(v3d);
81 }
82
83 void v3d_mmu_insert_ptes(struct v3d_bo *bo)
84 {
85         struct drm_gem_shmem_object *shmem_obj = &bo->base;
86         struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
87         u32 page = bo->node.start;
88         struct scatterlist *sgl;
89         unsigned int count;
90
91         for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, count) {
92                 dma_addr_t dma_addr = sg_dma_address(sgl);
93                 u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT;
94                 unsigned int len = sg_dma_len(sgl);
95
96                 while (len > 0) {
97                         u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
98                         u32 page_address = page_prot | pfn;
99                         unsigned int i, page_size;
100
101                         BUG_ON(pfn + V3D_PAGE_FACTOR >= BIT(24));
102
103                         if (len >= SZ_1M &&
104                             v3d_mmu_is_aligned(page, page_address, SZ_1M)) {
105                                 page_size = SZ_1M;
106                                 page_address |= V3D_PTE_SUPERPAGE;
107                         } else if (len >= SZ_64K &&
108                                    v3d_mmu_is_aligned(page, page_address, SZ_64K)) {
109                                 page_size = SZ_64K;
110                                 page_address |= V3D_PTE_BIGPAGE;
111                         } else {
112                                 page_size = SZ_4K;
113                         }
114
115                         for (i = 0; i < page_size >> V3D_MMU_PAGE_SHIFT; i++) {
116                                 v3d->pt[page++] = page_address + i;
117                                 pfn++;
118                         }
119
120                         len -= page_size;
121                 }
122         }
123
124         WARN_ON_ONCE(page - bo->node.start !=
125                      shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
126
127         if (v3d_mmu_flush_all(v3d))
128                 dev_err(v3d->drm.dev, "MMU flush timeout\n");
129 }
130
131 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
132 {
133         struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
134         u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
135         u32 page;
136
137         for (page = bo->node.start; page < bo->node.start + npages; page++)
138                 v3d->pt[page] = 0;
139
140         if (v3d_mmu_flush_all(v3d))
141                 dev_err(v3d->drm.dev, "MMU flush timeout\n");
142 }
This page took 0.035549 seconds and 4 git commands to generate.