]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
14cf11af PM |
2 | /* |
3 | * PowerPC version | |
4 | * Copyright (C) 1995-1996 Gary Thomas ([email protected]) | |
5 | * | |
6 | * Modifications by Paul Mackerras (PowerMac) ([email protected]) | |
7 | * and Cort Dougan (PReP) ([email protected]) | |
8 | * Copyright (C) 1996 Paul Mackerras | |
14cf11af PM |
9 | * PPC44x/36-bit changes by Matt Porter ([email protected]) |
10 | * | |
11 | * Derived from "arch/i386/mm/init.c" | |
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14cf11af PM |
13 | */ |
14 | ||
57c8a661 | 15 | #include <linux/memblock.h> |
14cf11af | 16 | #include <linux/highmem.h> |
4e8ad3e8 | 17 | #include <linux/suspend.h> |
8b5369ea | 18 | #include <linux/dma-direct.h> |
0cc2dc49 | 19 | #include <linux/execmem.h> |
0069455b | 20 | #include <linux/vmalloc.h> |
14cf11af | 21 | |
c6af2aa9 | 22 | #include <asm/swiotlb.h> |
14cf11af | 23 | #include <asm/machdep.h> |
8a3e3d31 | 24 | #include <asm/rtas.h> |
3d4247fc | 25 | #include <asm/kasan.h> |
eae9eec4 | 26 | #include <asm/svm.h> |
7eff9bc0 | 27 | #include <asm/mmzone.h> |
84ade0a6 | 28 | #include <asm/ftrace.h> |
0c3beacf | 29 | #include <asm/text-patching.h> |
113fe88e | 30 | #include <asm/setup.h> |
d3e01796 | 31 | #include <asm/fixmap.h> |
14cf11af | 32 | |
9d9f2ccc | 33 | #include <mm/mmu_decl.h> |
14cf11af | 34 | |
236a4c63 | 35 | unsigned long long memory_limit __initdata; |
7c8c6b97 | 36 | |
45b30faf CL |
37 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; |
38 | EXPORT_SYMBOL(empty_zero_page); | |
39 | ||
1f92a844 TZ |
40 | pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size, |
41 | pgprot_t vma_prot) | |
14cf11af PM |
42 | { |
43 | if (ppc_md.phys_mem_access_prot) | |
1f92a844 | 44 | return ppc_md.phys_mem_access_prot(pfn, size, vma_prot); |
14cf11af | 45 | |
8b150478 | 46 | if (!page_is_ram(pfn)) |
64b3d0e8 BH |
47 | vma_prot = pgprot_noncached(vma_prot); |
48 | ||
14cf11af PM |
49 | return vma_prot; |
50 | } | |
1f92a844 | 51 | EXPORT_SYMBOL(__phys_mem_access_prot); |
14cf11af | 52 | |
23fd0775 | 53 | #ifdef CONFIG_MEMORY_HOTPLUG |
9be77e11 | 54 | static DEFINE_MUTEX(linear_mapping_mutex); |
23fd0775 | 55 | |
bc02af93 YG |
56 | #ifdef CONFIG_NUMA |
57 | int memory_add_physaddr_to_nid(u64 start) | |
58 | { | |
59 | return hot_add_scn_to_nid(start); | |
60 | } | |
452e21cf | 61 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
bc02af93 YG |
62 | #endif |
63 | ||
4e00c5af LG |
64 | int __weak create_section_mapping(unsigned long start, unsigned long end, |
65 | int nid, pgprot_t prot) | |
fecbfabe BH |
66 | { |
67 | return -ENODEV; | |
68 | } | |
69 | ||
70 | int __weak remove_section_mapping(unsigned long start, unsigned long end) | |
71 | { | |
72 | return -ENODEV; | |
73 | } | |
74 | ||
4abb1e5b DH |
75 | int __ref arch_create_linear_mapping(int nid, u64 start, u64 size, |
76 | struct mhp_params *params) | |
23fd0775 | 77 | { |
1dace6c6 | 78 | int rc; |
23fd0775 | 79 | |
2d0eee14 | 80 | start = (unsigned long)__va(start); |
e5b2af04 | 81 | mutex_lock(&linear_mapping_mutex); |
bfeb022f LG |
82 | rc = create_section_mapping(start, start + size, nid, |
83 | params->pgprot); | |
e5b2af04 | 84 | mutex_unlock(&linear_mapping_mutex); |
1dace6c6 | 85 | if (rc) { |
4abb1e5b | 86 | pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n", |
1dace6c6 DG |
87 | start, start + size, rc); |
88 | return -EFAULT; | |
89 | } | |
4abb1e5b | 90 | return 0; |
23fd0775 | 91 | } |
24d335ca | 92 | |
4abb1e5b | 93 | void __ref arch_remove_linear_mapping(u64 start, u64 size) |
24d335ca | 94 | { |
9ac8cde9 | 95 | int ret; |
24d335ca | 96 | |
16d0f5c4 AB |
97 | /* Remove htab bolted mappings for this section of memory */ |
98 | start = (unsigned long)__va(start); | |
07626590 | 99 | |
e5b2af04 | 100 | mutex_lock(&linear_mapping_mutex); |
16d0f5c4 | 101 | ret = remove_section_mapping(start, start + size); |
e5b2af04 | 102 | mutex_unlock(&linear_mapping_mutex); |
1f73ad3e DH |
103 | if (ret) |
104 | pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n", | |
105 | start, start + size, ret); | |
16d0f5c4 AB |
106 | |
107 | /* Ensure all vmalloc mappings are flushed in case they also | |
108 | * hit that section of memory | |
109 | */ | |
110 | vm_unmap_aliases(); | |
24d335ca | 111 | } |
4abb1e5b | 112 | |
ac790d09 AK |
113 | /* |
114 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
115 | * updating. | |
116 | */ | |
117 | static void update_end_of_memory_vars(u64 start, u64 size) | |
118 | { | |
119 | unsigned long end_pfn = PFN_UP(start + size); | |
120 | ||
121 | if (end_pfn > max_pfn) { | |
122 | max_pfn = end_pfn; | |
123 | max_low_pfn = end_pfn; | |
124 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
125 | } | |
126 | } | |
127 | ||
128 | int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, | |
129 | struct mhp_params *params) | |
130 | { | |
131 | int ret; | |
132 | ||
133 | ret = __add_pages(nid, start_pfn, nr_pages, params); | |
134 | if (ret) | |
135 | return ret; | |
136 | ||
137 | /* update max_pfn, max_low_pfn and high_memory */ | |
138 | update_end_of_memory_vars(start_pfn << PAGE_SHIFT, | |
139 | nr_pages << PAGE_SHIFT); | |
140 | ||
141 | return ret; | |
142 | } | |
143 | ||
4abb1e5b DH |
144 | int __ref arch_add_memory(int nid, u64 start, u64 size, |
145 | struct mhp_params *params) | |
146 | { | |
147 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
148 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
149 | int rc; | |
150 | ||
151 | rc = arch_create_linear_mapping(nid, start, size, params); | |
152 | if (rc) | |
153 | return rc; | |
ac790d09 | 154 | rc = add_pages(nid, start_pfn, nr_pages, params); |
ca2c36ca DH |
155 | if (rc) |
156 | arch_remove_linear_mapping(start, size); | |
157 | return rc; | |
4abb1e5b DH |
158 | } |
159 | ||
65a2aa5f | 160 | void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
4abb1e5b DH |
161 | { |
162 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
163 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
164 | ||
165 | __remove_pages(start_pfn, nr_pages, altmap); | |
166 | arch_remove_linear_mapping(start, size); | |
167 | } | |
24d335ca | 168 | #endif |
a99824f3 | 169 | |
a9ee6cf5 | 170 | #ifndef CONFIG_NUMA |
9bd9be00 | 171 | void __init mem_topology_setup(void) |
7c8c6b97 | 172 | { |
95f72d1e | 173 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
10239733 | 174 | min_low_pfn = MEMORY_START >> PAGE_SHIFT; |
7c8c6b97 | 175 | #ifdef CONFIG_HIGHMEM |
d7917ba7 | 176 | max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; |
7c8c6b97 PM |
177 | #endif |
178 | ||
4e8309ba CS |
179 | /* Place all memblock_regions in the same node and merge contiguous |
180 | * memblock_regions | |
181 | */ | |
d7dc899a | 182 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); |
9bd9be00 | 183 | } |
c67c3cb4 | 184 | |
9bd9be00 NP |
185 | void __init initmem_init(void) |
186 | { | |
21098b9e | 187 | sparse_init(); |
7c8c6b97 PM |
188 | } |
189 | ||
4e8ad3e8 JB |
190 | /* mark pages that don't exist as nosave */ |
191 | static int __init mark_nonram_nosave(void) | |
192 | { | |
c9118e6c MR |
193 | unsigned long spfn, epfn, prev = 0; |
194 | int i; | |
195 | ||
196 | for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { | |
197 | if (prev && prev < spfn) | |
198 | register_nosave_region(prev, spfn); | |
199 | ||
200 | prev = epfn; | |
4e8ad3e8 | 201 | } |
c9118e6c | 202 | |
4e8ad3e8 JB |
203 | return 0; |
204 | } | |
a9ee6cf5 | 205 | #else /* CONFIG_NUMA */ |
6db35ad2 SW |
206 | static int __init mark_nonram_nosave(void) |
207 | { | |
208 | return 0; | |
209 | } | |
210 | #endif | |
4e8ad3e8 | 211 | |
1c98025c | 212 | /* |
25078dc1 CH |
213 | * Zones usage: |
214 | * | |
215 | * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be | |
216 | * everything else. GFP_DMA32 page allocations automatically fall back to | |
217 | * ZONE_DMA. | |
218 | * | |
ba0fb44a | 219 | * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the |
8b5369ea NSJ |
220 | * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU |
221 | * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by | |
222 | * ZONE_DMA. | |
1c98025c | 223 | */ |
25078dc1 | 224 | static unsigned long max_zone_pfns[MAX_NR_ZONES]; |
1c98025c | 225 | |
7c8c6b97 PM |
226 | /* |
227 | * paging_init() sets up the page tables - in fact we've already done this. | |
228 | */ | |
229 | void __init paging_init(void) | |
230 | { | |
f7ba2991 | 231 | unsigned long long total_ram = memblock_phys_mem_size(); |
95f72d1e | 232 | phys_addr_t top_of_ram = memblock_end_of_DRAM(); |
ba0fb44a | 233 | int zone_dma_bits; |
7c8c6b97 PM |
234 | |
235 | #ifdef CONFIG_HIGHMEM | |
2807273f CL |
236 | unsigned long v = __fix_to_virt(FIX_KMAP_END); |
237 | unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN); | |
2c419bde KG |
238 | |
239 | for (; v < end; v += PAGE_SIZE) | |
c766ee72 | 240 | map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ |
2c419bde | 241 | |
c766ee72 | 242 | map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ |
2c419bde | 243 | pkmap_page_table = virt_to_kpte(PKMAP_BASE); |
7c8c6b97 PM |
244 | #endif /* CONFIG_HIGHMEM */ |
245 | ||
f7ba2991 | 246 | printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", |
fb610635 | 247 | (unsigned long long)top_of_ram, total_ram); |
e110b281 | 248 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
2bf3016f | 249 | (long int)((top_of_ram - total_ram) >> 20)); |
1c98025c | 250 | |
8b5369ea NSJ |
251 | /* |
252 | * Allow 30-bit DMA for very limited Broadcom wifi chips on many | |
253 | * powerbooks. | |
254 | */ | |
255 | if (IS_ENABLED(CONFIG_PPC32)) | |
256 | zone_dma_bits = 30; | |
257 | else | |
258 | zone_dma_bits = 31; | |
259 | ||
ba0fb44a CM |
260 | zone_dma_limit = DMA_BIT_MASK(zone_dma_bits); |
261 | ||
25078dc1 | 262 | #ifdef CONFIG_ZONE_DMA |
9739ab7e | 263 | max_zone_pfns[ZONE_DMA] = min(max_low_pfn, |
8b5369ea | 264 | 1UL << (zone_dma_bits - PAGE_SHIFT)); |
25078dc1 CH |
265 | #endif |
266 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | |
7c8c6b97 | 267 | #ifdef CONFIG_HIGHMEM |
25078dc1 | 268 | max_zone_pfns[ZONE_HIGHMEM] = max_pfn; |
c67c3cb4 | 269 | #endif |
25078dc1 | 270 | |
9691a071 | 271 | free_area_init(max_zone_pfns); |
4e8ad3e8 JB |
272 | |
273 | mark_nonram_nosave(); | |
7c8c6b97 | 274 | } |
7c8c6b97 PM |
275 | |
276 | void __init mem_init(void) | |
277 | { | |
28efc35f SW |
278 | /* |
279 | * book3s is limited to 16 page sizes due to encoding this in | |
280 | * a 4-bit field for slices. | |
281 | */ | |
282 | BUILD_BUG_ON(MMU_PAGE_COUNT > 16); | |
283 | ||
a9327296 | 284 | #ifdef CONFIG_SWIOTLB |
8fabc623 MR |
285 | /* |
286 | * Some platforms (e.g. 85xx) limit DMA-able memory way below | |
287 | * 4G. We force memblock to bottom-up mode to ensure that the | |
288 | * memory allocated in swiotlb_init() is DMA-able. | |
289 | * As it's the last memblock allocation, no need to reset it | |
290 | * back to to-down. | |
291 | */ | |
292 | memblock_set_bottom_up(true); | |
8ba2ed1b | 293 | swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags); |
a9327296 FT |
294 | #endif |
295 | ||
3d4247fc CL |
296 | kasan_late_init(); |
297 | ||
c6ffc5ca | 298 | memblock_free_all(); |
7c8c6b97 PM |
299 | |
300 | #ifdef CONFIG_HIGHMEM | |
301 | { | |
302 | unsigned long pfn, highmem_mapnr; | |
303 | ||
d7917ba7 | 304 | highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; |
7c8c6b97 | 305 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { |
3d41e0f6 | 306 | phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; |
7c8c6b97 | 307 | struct page *page = pfn_to_page(pfn); |
2fc1c63d | 308 | if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) |
369a9d85 | 309 | free_highmem_page(page); |
7c8c6b97 | 310 | } |
7c8c6b97 PM |
311 | } |
312 | #endif /* CONFIG_HIGHMEM */ | |
313 | ||
3e731858 | 314 | #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) |
3160b097 BB |
315 | /* |
316 | * If smp is enabled, next_tlbcam_idx is initialized in the cpu up | |
317 | * functions.... do it here for the non-smp case. | |
318 | */ | |
319 | per_cpu(next_tlbcam_idx, smp_processor_id()) = | |
320 | (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | |
321 | #endif | |
322 | ||
f637a49e BH |
323 | #ifdef CONFIG_PPC32 |
324 | pr_info("Kernel virtual memory layout:\n"); | |
b4abe38f CL |
325 | #ifdef CONFIG_KASAN |
326 | pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", | |
327 | KASAN_SHADOW_START, KASAN_SHADOW_END); | |
328 | #endif | |
f637a49e BH |
329 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); |
330 | #ifdef CONFIG_HIGHMEM | |
331 | pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", | |
332 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); | |
333 | #endif /* CONFIG_HIGHMEM */ | |
ad628a34 CL |
334 | if (ioremap_bot != IOREMAP_TOP) |
335 | pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", | |
336 | ioremap_bot, IOREMAP_TOP); | |
f637a49e BH |
337 | pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", |
338 | VMALLOC_START, VMALLOC_END); | |
baf24d23 CL |
339 | #ifdef MODULES_VADDR |
340 | pr_info(" * 0x%08lx..0x%08lx : modules\n", | |
341 | MODULES_VADDR, MODULES_END); | |
342 | #endif | |
f637a49e | 343 | #endif /* CONFIG_PPC32 */ |
7c8c6b97 PM |
344 | } |
345 | ||
2773fcc8 DC |
346 | void free_initmem(void) |
347 | { | |
a9c0f41b | 348 | ppc_md.progress = ppc_printk_progress; |
029d9252 | 349 | mark_initmem_nx(); |
5d585e5c | 350 | free_initmem_default(POISON_FREE_INITMEM); |
84ade0a6 | 351 | ftrace_free_init_tramp(); |
2773fcc8 DC |
352 | } |
353 | ||
c40dd2f7 AB |
354 | /* |
355 | * System memory should not be in /proc/iomem but various tools expect it | |
356 | * (eg kdump). | |
357 | */ | |
4f770924 | 358 | static int __init add_system_ram_resources(void) |
c40dd2f7 | 359 | { |
b10d6bca MR |
360 | phys_addr_t start, end; |
361 | u64 i; | |
c40dd2f7 | 362 | |
b10d6bca | 363 | for_each_mem_range(i, &start, &end) { |
c40dd2f7 | 364 | struct resource *res; |
c40dd2f7 AB |
365 | |
366 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | |
367 | WARN_ON(!res); | |
368 | ||
369 | if (res) { | |
370 | res->name = "System RAM"; | |
b10d6bca MR |
371 | res->start = start; |
372 | /* | |
373 | * In memblock, end points to the first byte after | |
374 | * the range while in resourses, end points to the | |
375 | * last byte in the range. | |
376 | */ | |
377 | res->end = end - 1; | |
35d98e93 | 378 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
c40dd2f7 AB |
379 | WARN_ON(request_resource(&iomem_resource, res) < 0); |
380 | } | |
381 | } | |
382 | ||
383 | return 0; | |
384 | } | |
385 | subsys_initcall(add_system_ram_resources); | |
1d54cf2b | 386 | |
387 | #ifdef CONFIG_STRICT_DEVMEM | |
388 | /* | |
389 | * devmem_is_allowed(): check to see if /dev/mem access to a certain address | |
390 | * is valid. The argument is a physical page number. | |
391 | * | |
392 | * Access has to be given to non-kernel-ram areas as well, these contain the | |
393 | * PCI mmio resources as well as potential bios/acpi data regions. | |
394 | */ | |
395 | int devmem_is_allowed(unsigned long pfn) | |
396 | { | |
e256caa7 VH |
397 | if (page_is_rtas_user_buf(pfn)) |
398 | return 1; | |
6c0cc627 | 399 | if (iomem_is_exclusive(PFN_PHYS(pfn))) |
1d54cf2b | 400 | return 0; |
401 | if (!page_is_ram(pfn)) | |
402 | return 1; | |
403 | return 0; | |
404 | } | |
405 | #endif /* CONFIG_STRICT_DEVMEM */ | |
26b52335 CL |
406 | |
407 | /* | |
408 | * This is defined in kernel/resource.c but only powerpc needs to export it, for | |
409 | * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. | |
410 | */ | |
411 | EXPORT_SYMBOL_GPL(walk_system_ram_range); | |
0cc2dc49 MRI |
412 | |
413 | #ifdef CONFIG_EXECMEM | |
414 | static struct execmem_info execmem_info __ro_after_init; | |
415 | ||
82ef440f | 416 | #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603) |
16a71c04 CL |
417 | static void prealloc_execmem_pgtable(void) |
418 | { | |
419 | unsigned long va; | |
420 | ||
421 | for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE) | |
422 | pte_alloc_kernel(pmd_off_k(va), va); | |
423 | } | |
424 | #else | |
425 | static void prealloc_execmem_pgtable(void) { } | |
426 | #endif | |
427 | ||
0cc2dc49 MRI |
428 | struct execmem_info __init *execmem_arch_setup(void) |
429 | { | |
430 | pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; | |
431 | pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC; | |
432 | unsigned long fallback_start = 0, fallback_end = 0; | |
433 | unsigned long start, end; | |
434 | ||
435 | /* | |
436 | * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and | |
437 | * allow allocating data in the entire vmalloc space | |
438 | */ | |
439 | #ifdef MODULES_VADDR | |
440 | unsigned long limit = (unsigned long)_etext - SZ_32M; | |
441 | ||
442 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | |
443 | ||
444 | /* First try within 32M limit from _etext to avoid branch trampolines */ | |
445 | if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) { | |
446 | start = limit; | |
447 | fallback_start = MODULES_VADDR; | |
448 | fallback_end = MODULES_END; | |
449 | } else { | |
450 | start = MODULES_VADDR; | |
451 | } | |
452 | ||
453 | end = MODULES_END; | |
454 | #else | |
455 | start = VMALLOC_START; | |
456 | end = VMALLOC_END; | |
457 | #endif | |
458 | ||
16a71c04 CL |
459 | prealloc_execmem_pgtable(); |
460 | ||
0cc2dc49 MRI |
461 | execmem_info = (struct execmem_info){ |
462 | .ranges = { | |
463 | [EXECMEM_DEFAULT] = { | |
464 | .start = start, | |
465 | .end = end, | |
466 | .pgprot = prot, | |
467 | .alignment = 1, | |
468 | .fallback_start = fallback_start, | |
469 | .fallback_end = fallback_end, | |
470 | }, | |
471 | [EXECMEM_KPROBES] = { | |
472 | .start = VMALLOC_START, | |
473 | .end = VMALLOC_END, | |
474 | .pgprot = kprobes_prot, | |
475 | .alignment = 1, | |
476 | }, | |
477 | [EXECMEM_MODULE_DATA] = { | |
478 | .start = VMALLOC_START, | |
479 | .end = VMALLOC_END, | |
480 | .pgprot = PAGE_KERNEL, | |
481 | .alignment = 1, | |
482 | }, | |
483 | }, | |
484 | }; | |
485 | ||
486 | return &execmem_info; | |
487 | } | |
488 | #endif /* CONFIG_EXECMEM */ |