]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8f6aac41 CL |
2 | /* |
3 | * Virtual Memory Map support | |
4 | * | |
cde53535 | 5 | * (C) 2007 sgi. Christoph Lameter. |
8f6aac41 CL |
6 | * |
7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, | |
8 | * virt_to_page, page_address() to be implemented as a base offset | |
9 | * calculation without memory access. | |
10 | * | |
11 | * However, virtual mappings need a page table and TLBs. Many Linux | |
12 | * architectures already map their physical space using 1-1 mappings | |
b595076a | 13 | * via TLBs. For those arches the virtual memory map is essentially |
8f6aac41 CL |
14 | * for free if we use the same page size as the 1-1 mappings. In that |
15 | * case the overhead consists of a few additional pages that are | |
16 | * allocated to create a view of memory for vmemmap. | |
17 | * | |
29c71111 AW |
18 | * The architecture is expected to provide a vmemmap_populate() function |
19 | * to instantiate the mapping. | |
8f6aac41 CL |
20 | */ |
21 | #include <linux/mm.h> | |
22 | #include <linux/mmzone.h> | |
23 | #include <linux/bootmem.h> | |
4b94ffdc | 24 | #include <linux/memremap.h> |
8f6aac41 | 25 | #include <linux/highmem.h> |
5a0e3ad6 | 26 | #include <linux/slab.h> |
8f6aac41 CL |
27 | #include <linux/spinlock.h> |
28 | #include <linux/vmalloc.h> | |
8bca44bb | 29 | #include <linux/sched.h> |
8f6aac41 CL |
30 | #include <asm/dma.h> |
31 | #include <asm/pgalloc.h> | |
32 | #include <asm/pgtable.h> | |
33 | ||
34 | /* | |
35 | * Allocate a block of memory to be used to back the virtual memory map | |
36 | * or to back the page tables that are used to create the mapping. | |
37 | * Uses the main allocators if they are available, else bootmem. | |
38 | */ | |
e0dc3a53 | 39 | |
bd721ea7 | 40 | static void * __ref __earlyonly_bootmem_alloc(int node, |
e0dc3a53 KH |
41 | unsigned long size, |
42 | unsigned long align, | |
43 | unsigned long goal) | |
44 | { | |
f7f99100 | 45 | return memblock_virt_alloc_try_nid_raw(size, align, goal, |
bb016b84 | 46 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
e0dc3a53 KH |
47 | } |
48 | ||
9bdac914 YL |
49 | static void *vmemmap_buf; |
50 | static void *vmemmap_buf_end; | |
e0dc3a53 | 51 | |
8f6aac41 CL |
52 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
53 | { | |
54 | /* If the main allocator is up use that, fallback to bootmem. */ | |
55 | if (slab_is_available()) { | |
fcdaf842 MH |
56 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
57 | int order = get_order(size); | |
58 | static bool warned; | |
f52407ce SL |
59 | struct page *page; |
60 | ||
fcdaf842 | 61 | page = alloc_pages_node(node, gfp_mask, order); |
8f6aac41 CL |
62 | if (page) |
63 | return page_address(page); | |
fcdaf842 MH |
64 | |
65 | if (!warned) { | |
66 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, | |
67 | "vmemmap alloc failure: order:%u", order); | |
68 | warned = true; | |
69 | } | |
8f6aac41 CL |
70 | return NULL; |
71 | } else | |
e0dc3a53 | 72 | return __earlyonly_bootmem_alloc(node, size, size, |
8f6aac41 CL |
73 | __pa(MAX_DMA_ADDRESS)); |
74 | } | |
75 | ||
9bdac914 | 76 | /* need to make sure size is all the same during early stage */ |
a8fc357b | 77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
9bdac914 YL |
78 | { |
79 | void *ptr; | |
80 | ||
81 | if (!vmemmap_buf) | |
82 | return vmemmap_alloc_block(size, node); | |
83 | ||
84 | /* take the from buf */ | |
85 | ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); | |
86 | if (ptr + size > vmemmap_buf_end) | |
87 | return vmemmap_alloc_block(size, node); | |
88 | ||
89 | vmemmap_buf = ptr + size; | |
90 | ||
91 | return ptr; | |
92 | } | |
93 | ||
4b94ffdc DW |
94 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
95 | { | |
96 | return altmap->base_pfn + altmap->reserve + altmap->alloc | |
97 | + altmap->align; | |
98 | } | |
99 | ||
100 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) | |
101 | { | |
102 | unsigned long allocated = altmap->alloc + altmap->align; | |
103 | ||
104 | if (altmap->free > allocated) | |
105 | return altmap->free - allocated; | |
106 | return 0; | |
107 | } | |
108 | ||
109 | /** | |
110 | * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation | |
111 | * @altmap - reserved page pool for the allocation | |
112 | * @nr_pfns - size (in pages) of the allocation | |
113 | * | |
114 | * Allocations are aligned to the size of the request | |
115 | */ | |
116 | static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, | |
117 | unsigned long nr_pfns) | |
118 | { | |
119 | unsigned long pfn = vmem_altmap_next_pfn(altmap); | |
120 | unsigned long nr_align; | |
121 | ||
122 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); | |
123 | nr_align = ALIGN(pfn, nr_align) - pfn; | |
124 | ||
125 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) | |
126 | return ULONG_MAX; | |
127 | altmap->alloc += nr_pfns; | |
128 | altmap->align += nr_align; | |
129 | return pfn + nr_align; | |
130 | } | |
131 | ||
a8fc357b | 132 | void * __meminit altmap_alloc_block_buf(unsigned long size, |
4b94ffdc DW |
133 | struct vmem_altmap *altmap) |
134 | { | |
135 | unsigned long pfn, nr_pfns; | |
136 | void *ptr; | |
137 | ||
138 | if (size & ~PAGE_MASK) { | |
139 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", | |
140 | __func__, size); | |
141 | return NULL; | |
142 | } | |
143 | ||
144 | nr_pfns = size >> PAGE_SHIFT; | |
145 | pfn = vmem_altmap_alloc(altmap, nr_pfns); | |
146 | if (pfn < ULONG_MAX) | |
147 | ptr = __va(__pfn_to_phys(pfn)); | |
148 | else | |
149 | ptr = NULL; | |
150 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", | |
151 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); | |
152 | ||
153 | return ptr; | |
154 | } | |
155 | ||
8f6aac41 CL |
156 | void __meminit vmemmap_verify(pte_t *pte, int node, |
157 | unsigned long start, unsigned long end) | |
158 | { | |
159 | unsigned long pfn = pte_pfn(*pte); | |
160 | int actual_node = early_pfn_to_nid(pfn); | |
161 | ||
b41ad14c | 162 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
1170532b JP |
163 | pr_warn("[%lx-%lx] potential offnode page_structs\n", |
164 | start, end - 1); | |
8f6aac41 CL |
165 | } |
166 | ||
29c71111 | 167 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
8f6aac41 | 168 | { |
29c71111 AW |
169 | pte_t *pte = pte_offset_kernel(pmd, addr); |
170 | if (pte_none(*pte)) { | |
171 | pte_t entry; | |
a8fc357b | 172 | void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
29c71111 | 173 | if (!p) |
9dce07f1 | 174 | return NULL; |
29c71111 AW |
175 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
176 | set_pte_at(&init_mm, addr, pte, entry); | |
177 | } | |
178 | return pte; | |
8f6aac41 CL |
179 | } |
180 | ||
f7f99100 PT |
181 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
182 | { | |
183 | void *p = vmemmap_alloc_block(size, node); | |
184 | ||
185 | if (!p) | |
186 | return NULL; | |
187 | memset(p, 0, size); | |
188 | ||
189 | return p; | |
190 | } | |
191 | ||
29c71111 | 192 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
8f6aac41 | 193 | { |
29c71111 AW |
194 | pmd_t *pmd = pmd_offset(pud, addr); |
195 | if (pmd_none(*pmd)) { | |
f7f99100 | 196 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 197 | if (!p) |
9dce07f1 | 198 | return NULL; |
29c71111 | 199 | pmd_populate_kernel(&init_mm, pmd, p); |
8f6aac41 | 200 | } |
29c71111 | 201 | return pmd; |
8f6aac41 | 202 | } |
8f6aac41 | 203 | |
c2febafc | 204 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
8f6aac41 | 205 | { |
c2febafc | 206 | pud_t *pud = pud_offset(p4d, addr); |
29c71111 | 207 | if (pud_none(*pud)) { |
f7f99100 | 208 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 209 | if (!p) |
9dce07f1 | 210 | return NULL; |
29c71111 AW |
211 | pud_populate(&init_mm, pud, p); |
212 | } | |
213 | return pud; | |
214 | } | |
8f6aac41 | 215 | |
c2febafc KS |
216 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
217 | { | |
218 | p4d_t *p4d = p4d_offset(pgd, addr); | |
219 | if (p4d_none(*p4d)) { | |
f7f99100 | 220 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
c2febafc KS |
221 | if (!p) |
222 | return NULL; | |
223 | p4d_populate(&init_mm, p4d, p); | |
224 | } | |
225 | return p4d; | |
226 | } | |
227 | ||
29c71111 AW |
228 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
229 | { | |
230 | pgd_t *pgd = pgd_offset_k(addr); | |
231 | if (pgd_none(*pgd)) { | |
f7f99100 | 232 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 233 | if (!p) |
9dce07f1 | 234 | return NULL; |
29c71111 | 235 | pgd_populate(&init_mm, pgd, p); |
8f6aac41 | 236 | } |
29c71111 | 237 | return pgd; |
8f6aac41 CL |
238 | } |
239 | ||
0aad818b JW |
240 | int __meminit vmemmap_populate_basepages(unsigned long start, |
241 | unsigned long end, int node) | |
8f6aac41 | 242 | { |
0aad818b | 243 | unsigned long addr = start; |
29c71111 | 244 | pgd_t *pgd; |
c2febafc | 245 | p4d_t *p4d; |
29c71111 AW |
246 | pud_t *pud; |
247 | pmd_t *pmd; | |
248 | pte_t *pte; | |
8f6aac41 | 249 | |
29c71111 AW |
250 | for (; addr < end; addr += PAGE_SIZE) { |
251 | pgd = vmemmap_pgd_populate(addr, node); | |
252 | if (!pgd) | |
253 | return -ENOMEM; | |
c2febafc KS |
254 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
255 | if (!p4d) | |
256 | return -ENOMEM; | |
257 | pud = vmemmap_pud_populate(p4d, addr, node); | |
29c71111 AW |
258 | if (!pud) |
259 | return -ENOMEM; | |
260 | pmd = vmemmap_pmd_populate(pud, addr, node); | |
261 | if (!pmd) | |
262 | return -ENOMEM; | |
263 | pte = vmemmap_pte_populate(pmd, addr, node); | |
264 | if (!pte) | |
265 | return -ENOMEM; | |
266 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); | |
8f6aac41 | 267 | } |
29c71111 AW |
268 | |
269 | return 0; | |
8f6aac41 | 270 | } |
8f6aac41 | 271 | |
7b73d978 CH |
272 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, |
273 | struct vmem_altmap *altmap) | |
8f6aac41 | 274 | { |
0aad818b JW |
275 | unsigned long start; |
276 | unsigned long end; | |
277 | struct page *map; | |
278 | ||
279 | map = pfn_to_page(pnum * PAGES_PER_SECTION); | |
280 | start = (unsigned long)map; | |
281 | end = (unsigned long)(map + PAGES_PER_SECTION); | |
282 | ||
7b73d978 | 283 | if (vmemmap_populate(start, end, nid, altmap)) |
8f6aac41 CL |
284 | return NULL; |
285 | ||
286 | return map; | |
287 | } | |
9bdac914 YL |
288 | |
289 | void __init sparse_mem_maps_populate_node(struct page **map_map, | |
290 | unsigned long pnum_begin, | |
291 | unsigned long pnum_end, | |
292 | unsigned long map_count, int nodeid) | |
293 | { | |
294 | unsigned long pnum; | |
295 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | |
296 | void *vmemmap_buf_start; | |
297 | ||
298 | size = ALIGN(size, PMD_SIZE); | |
299 | vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, | |
300 | PMD_SIZE, __pa(MAX_DMA_ADDRESS)); | |
301 | ||
302 | if (vmemmap_buf_start) { | |
303 | vmemmap_buf = vmemmap_buf_start; | |
304 | vmemmap_buf_end = vmemmap_buf_start + size * map_count; | |
305 | } | |
306 | ||
307 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
308 | struct mem_section *ms; | |
309 | ||
310 | if (!present_section_nr(pnum)) | |
311 | continue; | |
312 | ||
7b73d978 | 313 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); |
9bdac914 YL |
314 | if (map_map[pnum]) |
315 | continue; | |
316 | ms = __nr_to_section(pnum); | |
1170532b | 317 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
756a025f | 318 | __func__); |
9bdac914 YL |
319 | ms->section_mem_map = 0; |
320 | } | |
321 | ||
322 | if (vmemmap_buf_start) { | |
323 | /* need to free left buf */ | |
bb016b84 SS |
324 | memblock_free_early(__pa(vmemmap_buf), |
325 | vmemmap_buf_end - vmemmap_buf); | |
9bdac914 YL |
326 | vmemmap_buf = NULL; |
327 | vmemmap_buf_end = NULL; | |
328 | } | |
329 | } |