1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
20 #include <linux/swap.h>
21 #include <linux/swiotlb.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/memblock.h>
26 #include <linux/memory.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/initrd.h>
30 #include <linux/export.h>
31 #include <linux/cma.h>
32 #include <linux/gfp.h>
33 #include <linux/dma-direct.h>
34 #include <linux/percpu.h>
35 #include <asm/processor.h>
36 #include <linux/uaccess.h>
37 #include <asm/pgalloc.h>
38 #include <asm/kfence.h>
39 #include <asm/ptdump.h>
41 #include <asm/abs_lowcore.h>
43 #include <asm/tlbflush.h>
44 #include <asm/sections.h>
45 #include <asm/ctl_reg.h>
47 #include <asm/set_memory.h>
48 #include <asm/kasan.h>
49 #include <asm/dma-mapping.h>
51 #include <linux/virtio_anchor.h>
52 #include <linux/virtio_config.h>
54 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
55 static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
57 unsigned long s390_invalid_asce;
59 unsigned long empty_zero_page, zero_page_mask;
60 EXPORT_SYMBOL(empty_zero_page);
61 EXPORT_SYMBOL(zero_page_mask);
63 static void __init setup_zero_pages(void)
69 /* Latest machines require a mapping granularity of 512KB */
72 /* Limit number of empty zero pages for small memory sizes */
73 while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
76 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
78 panic("Out of memory in setup_zero_pages");
80 page = virt_to_page((void *) empty_zero_page);
81 split_page(page, order);
82 for (i = 1 << order; i > 0; i--) {
83 mark_page_reserved(page);
87 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
91 * paging_init() sets up the page tables
93 void __init paging_init(void)
95 unsigned long max_zone_pfns[MAX_NR_ZONES];
96 unsigned long pgd_type, asce_bits;
99 s390_invalid_asce = (unsigned long)invalid_pg_dir;
100 s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
101 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
102 init_mm.pgd = swapper_pg_dir;
103 if (VMALLOC_END > _REGION2_SIZE) {
104 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
105 pgd_type = _REGION2_ENTRY_EMPTY;
107 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
108 pgd_type = _REGION3_ENTRY_EMPTY;
110 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
111 S390_lowcore.kernel_asce = init_mm.context.asce;
112 S390_lowcore.user_asce = s390_invalid_asce;
113 crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
115 kasan_copy_shadow_mapping();
117 /* enable virtual mapping in kernel mode */
118 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
119 __ctl_load(S390_lowcore.user_asce, 7, 7);
120 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
121 psw.mask = __extract_psw();
122 psw_bits(psw).dat = 1;
123 psw_bits(psw).as = PSW_BITS_AS_HOME;
124 __load_psw_mask(psw.mask);
125 kasan_free_early_identity();
129 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
130 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
131 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
132 free_area_init(max_zone_pfns);
135 void mark_rodata_ro(void)
137 unsigned long size = __end_ro_after_init - __start_ro_after_init;
139 set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
140 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
144 int set_memory_encrypted(unsigned long vaddr, int numpages)
148 /* make specified pages unshared, (swiotlb, dma_free) */
149 for (i = 0; i < numpages; ++i) {
150 uv_remove_shared(virt_to_phys((void *)vaddr));
156 int set_memory_decrypted(unsigned long vaddr, int numpages)
159 /* make specified pages shared (swiotlb, dma_alloca) */
160 for (i = 0; i < numpages; ++i) {
161 uv_set_shared(virt_to_phys((void *)vaddr));
167 /* are we a protected virtualization guest? */
168 bool force_dma_unencrypted(struct device *dev)
170 return is_prot_virt_guest();
173 /* protected virtualization */
174 static void pv_init(void)
176 if (!is_prot_virt_guest())
179 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
181 /* make sure bounce buffers are shared */
182 swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
183 swiotlb_update_mem_attributes();
186 void __init mem_init(void)
188 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
189 cpumask_set_cpu(0, mm_cpumask(&init_mm));
191 set_max_mapnr(max_low_pfn);
192 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
195 kfence_split_mapping();
196 /* Setup guest page hinting */
199 /* this will put all low memory onto the freelists */
201 setup_zero_pages(); /* Setup zeroed pages. */
206 void free_initmem(void)
208 __set_memory((unsigned long)_sinittext,
209 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
210 SET_MEMORY_RW | SET_MEMORY_NX);
211 free_initmem_default(POISON_FREE_INITMEM);
214 unsigned long memory_block_size_bytes(void)
217 * Make sure the memory block size is always greater
218 * or equal than the memory increment size.
220 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
223 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
224 EXPORT_SYMBOL(__per_cpu_offset);
226 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
228 return LOCAL_DISTANCE;
231 static int __init pcpu_cpu_to_node(int cpu)
236 void __init setup_per_cpu_areas(void)
243 * Always reserve area for module percpu variables. That's
244 * what the legacy allocator did.
246 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
247 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
251 panic("Failed to initialize percpu areas.");
253 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
254 for_each_possible_cpu(cpu)
255 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
258 #ifdef CONFIG_MEMORY_HOTPLUG
262 /* Prevent memory blocks which contain cma regions from going offline */
264 struct s390_cma_mem_data {
269 static int s390_cma_check_range(struct cma *cma, void *data)
271 struct s390_cma_mem_data *mem_data;
272 unsigned long start, end;
275 start = cma_get_base(cma);
276 end = start + cma_get_size(cma);
277 if (end < mem_data->start)
279 if (start >= mem_data->end)
284 static int s390_cma_mem_notifier(struct notifier_block *nb,
285 unsigned long action, void *data)
287 struct s390_cma_mem_data mem_data;
288 struct memory_notify *arg;
292 mem_data.start = arg->start_pfn << PAGE_SHIFT;
293 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
294 if (action == MEM_GOING_OFFLINE)
295 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
296 return notifier_from_errno(rc);
299 static struct notifier_block s390_cma_mem_nb = {
300 .notifier_call = s390_cma_mem_notifier,
303 static int __init s390_cma_mem_init(void)
305 return register_memory_notifier(&s390_cma_mem_nb);
307 device_initcall(s390_cma_mem_init);
309 #endif /* CONFIG_CMA */
311 int arch_add_memory(int nid, u64 start, u64 size,
312 struct mhp_params *params)
314 unsigned long start_pfn = PFN_DOWN(start);
315 unsigned long size_pages = PFN_DOWN(size);
318 if (WARN_ON_ONCE(params->altmap))
321 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
324 VM_BUG_ON(!mhp_range_allowed(start, size, true));
325 rc = vmem_add_mapping(start, size);
329 rc = __add_pages(nid, start_pfn, size_pages, params);
331 vmem_remove_mapping(start, size);
335 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
337 unsigned long start_pfn = start >> PAGE_SHIFT;
338 unsigned long nr_pages = size >> PAGE_SHIFT;
340 __remove_pages(start_pfn, nr_pages, altmap);
341 vmem_remove_mapping(start, size);
343 #endif /* CONFIG_MEMORY_HOTPLUG */