1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/processor.h>
3 #include <linux/errno.h>
4 #include <linux/init.h>
5 #include <asm/physmem_info.h>
6 #include <asm/stacktrace.h>
7 #include <asm/boot_data.h>
8 #include <asm/sparsemem.h>
9 #include <asm/sections.h>
10 #include <asm/setup.h>
13 #include "decompressor.h"
16 struct physmem_info __bootdata(physmem_info);
17 static unsigned int physmem_alloc_ranges;
18 static unsigned long physmem_alloc_pos;
20 /* up to 256 storage elements, 1020 subincrements each */
21 #define ENTRIES_EXTENDED_MAX \
22 (256 * (1020 / 2) * sizeof(struct physmem_range))
24 static struct physmem_range *__get_physmem_range_ptr(u32 n)
26 if (n < MEM_INLINED_ENTRIES)
27 return &physmem_info.online[n];
28 if (unlikely(!physmem_info.online_extended)) {
29 physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
30 RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
31 physmem_alloc_pos, true);
33 return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
37 * sequential calls to add_physmem_online_range with adjacent memory ranges
38 * are merged together into single memory range.
40 void add_physmem_online_range(u64 start, u64 end)
42 struct physmem_range *range;
44 if (physmem_info.range_count) {
45 range = __get_physmem_range_ptr(physmem_info.range_count - 1);
46 if (range->end == start) {
52 range = __get_physmem_range_ptr(physmem_info.range_count);
55 physmem_info.range_count++;
58 static int __diag260(unsigned long rx1, unsigned long rx2)
60 unsigned long reg1, reg2, ry;
61 union register_pair rx;
67 ry = 0x10; /* storage configuration */
70 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
71 " epsw %[reg1],%[reg2]\n"
72 " st %[reg1],0(%[psw_pgm])\n"
73 " st %[reg2],4(%[psw_pgm])\n"
75 " stg %[reg1],8(%[psw_pgm])\n"
76 " diag %[rx],%[ry],0x260\n"
79 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
80 : [reg1] "=&d" (reg1),
84 "+Q" (S390_lowcore.program_new_psw),
88 [psw_pgm] "a" (&S390_lowcore.program_new_psw)
90 return rc == 0 ? ry : -1;
93 static int diag260(void)
100 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
102 memset(storage_extents, 0, sizeof(storage_extents));
103 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
107 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
108 add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
112 static int tprot(unsigned long addr)
114 unsigned long reg1, reg2;
119 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
120 " epsw %[reg1],%[reg2]\n"
121 " st %[reg1],0(%[psw_pgm])\n"
122 " st %[reg2],4(%[psw_pgm])\n"
124 " stg %[reg1],8(%[psw_pgm])\n"
125 " tprot 0(%[addr]),0\n"
128 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
129 : [reg1] "=&d" (reg1),
132 "=Q" (S390_lowcore.program_new_psw.addr),
134 : [psw_old] "a" (&old),
135 [psw_pgm] "a" (&S390_lowcore.program_new_psw),
141 static unsigned long search_mem_end(void)
143 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
144 unsigned long offset = 0;
149 pivot = offset + range;
150 if (!tprot(pivot << 20))
153 return (offset + 1) << 20;
156 unsigned long detect_max_physmem_end(void)
158 unsigned long max_physmem_end = 0;
160 if (!sclp_early_get_memsize(&max_physmem_end)) {
161 physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
163 max_physmem_end = search_mem_end();
164 physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
166 return max_physmem_end;
169 void detect_physmem_online_ranges(unsigned long max_physmem_end)
171 if (!sclp_early_read_storage_info()) {
172 physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
173 } else if (!diag260()) {
174 physmem_info.info_source = MEM_DETECT_DIAG260;
175 } else if (max_physmem_end) {
176 add_physmem_online_range(0, max_physmem_end);
180 void physmem_set_usable_limit(unsigned long limit)
182 physmem_info.usable = limit;
183 physmem_alloc_pos = limit;
186 static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
188 unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
189 struct reserved_range *range;
190 enum reserved_range_type t;
193 decompressor_printk("Linux version %s\n", kernel_version);
194 if (!is_prot_virt_guest() && early_command_line[0])
195 decompressor_printk("Kernel command line: %s\n", early_command_line);
196 decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
197 size, align, min, max);
198 decompressor_printk("Reserved memory ranges:\n");
199 for_each_physmem_reserved_range(t, range, &start, &end) {
200 decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
201 total_reserved_mem += end - start;
203 decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
204 get_physmem_info_source(), physmem_info.info_source);
205 for_each_physmem_usable_range(i, &start, &end) {
206 decompressor_printk("%016lx %016lx\n", start, end);
207 total_mem += end - start;
209 decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
210 total_mem, total_reserved_mem,
211 total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
212 print_stacktrace(current_frame_address());
213 sclp_early_printk("\n\n -- System halted\n");
217 void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
219 physmem_info.reserved[type].start = addr;
220 physmem_info.reserved[type].end = addr + size;
223 void physmem_free(enum reserved_range_type type)
225 physmem_info.reserved[type].start = 0;
226 physmem_info.reserved[type].end = 0;
229 static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
230 unsigned long *intersection_start)
232 unsigned long res_addr, res_size;
235 for (t = 0; t < RR_MAX; t++) {
236 if (!get_physmem_reserved(t, &res_addr, &res_size))
238 if (intersects(addr, size, res_addr, res_size)) {
239 *intersection_start = res_addr;
243 return ipl_report_certs_intersects(addr, size, intersection_start);
246 static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
247 unsigned long min, unsigned long max,
248 unsigned int from_ranges, unsigned int *ranges_left,
251 unsigned int nranges = from_ranges ?: physmem_info.range_count;
252 unsigned long range_start, range_end;
253 unsigned long intersection_start;
254 unsigned long addr, pos = max;
256 align = max(align, 8UL);
258 __get_physmem_range(nranges - 1, &range_start, &range_end, false);
259 pos = min(range_end, pos);
261 if (round_up(min, align) + size > pos)
263 addr = round_down(pos - size, align);
264 if (range_start > addr) {
268 if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
269 pos = intersection_start;
274 *ranges_left = nranges;
278 die_oom(size, align, min, max);
282 unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
283 unsigned long align, unsigned long min, unsigned long max,
288 max = min(max, physmem_alloc_pos);
289 addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
291 physmem_reserve(type, addr, size);
295 unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
298 struct reserved_range *range = &physmem_info.reserved[type];
299 struct reserved_range *new_range;
300 unsigned int ranges_left;
303 addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
305 /* if not a consecutive allocation of the same type or first allocation */
306 if (range->start != addr + size) {
308 physmem_alloc_pos = __physmem_alloc_range(
309 sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
310 physmem_alloc_ranges, &ranges_left, true);
311 new_range = (struct reserved_range *)physmem_alloc_pos;
313 range->chain = new_range;
314 addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
315 ranges_left, &ranges_left, true);
317 range->end = addr + size;
320 physmem_alloc_pos = addr;
321 physmem_alloc_ranges = ranges_left;
325 unsigned long get_physmem_alloc_pos(void)
327 return physmem_alloc_pos;