1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/processor.h>
3 #include <linux/errno.h>
4 #include <linux/init.h>
5 #include <asm/physmem_info.h>
6 #include <asm/stacktrace.h>
7 #include <asm/boot_data.h>
8 #include <asm/sparsemem.h>
9 #include <asm/sections.h>
10 #include <asm/setup.h>
14 #include "decompressor.h"
17 struct physmem_info __bootdata(physmem_info);
18 static unsigned int physmem_alloc_ranges;
19 static unsigned long physmem_alloc_pos;
21 /* up to 256 storage elements, 1020 subincrements each */
22 #define ENTRIES_EXTENDED_MAX \
23 (256 * (1020 / 2) * sizeof(struct physmem_range))
25 static struct physmem_range *__get_physmem_range_ptr(u32 n)
27 if (n < MEM_INLINED_ENTRIES)
28 return &physmem_info.online[n];
29 if (unlikely(!physmem_info.online_extended)) {
30 physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
31 RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
32 physmem_alloc_pos, true);
34 return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
38 * sequential calls to add_physmem_online_range with adjacent memory ranges
39 * are merged together into single memory range.
41 void add_physmem_online_range(u64 start, u64 end)
43 struct physmem_range *range;
45 if (physmem_info.range_count) {
46 range = __get_physmem_range_ptr(physmem_info.range_count - 1);
47 if (range->end == start) {
53 range = __get_physmem_range_ptr(physmem_info.range_count);
56 physmem_info.range_count++;
59 static int __diag260(unsigned long rx1, unsigned long rx2)
61 unsigned long reg1, reg2, ry;
62 union register_pair rx;
68 ry = 0x10; /* storage configuration */
71 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
72 " epsw %[reg1],%[reg2]\n"
73 " st %[reg1],0(%[psw_pgm])\n"
74 " st %[reg2],4(%[psw_pgm])\n"
76 " stg %[reg1],8(%[psw_pgm])\n"
77 " diag %[rx],%[ry],0x260\n"
79 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
82 [exc] "+d" (exception),
86 "+Q" (get_lowcore()->program_new_psw),
90 [psw_pgm] "a" (&get_lowcore()->program_new_psw)
91 : CC_CLOBBER_LIST("memory"));
92 cc = exception ? -1 : CC_TRANSFORM(cc);
93 return cc == 0 ? ry : -1;
96 static int diag260(void)
103 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
105 memset(storage_extents, 0, sizeof(storage_extents));
106 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
110 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
111 add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
115 #define DIAG500_SC_STOR_LIMIT 4
117 static int diag500_storage_limit(unsigned long *max_physmem_end)
119 unsigned long storage_limit;
120 unsigned long reg1, reg2;
124 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
125 " epsw %[reg1],%[reg2]\n"
126 " st %[reg1],0(%[psw_pgm])\n"
127 " st %[reg2],4(%[psw_pgm])\n"
129 " stg %[reg1],8(%[psw_pgm])\n"
130 " lghi 1,%[subcode]\n"
133 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
135 : [reg1] "=&d" (reg1),
137 [slimit] "=d" (storage_limit),
138 "=Q" (get_lowcore()->program_new_psw),
140 : [psw_old] "a" (&old),
141 [psw_pgm] "a" (&get_lowcore()->program_new_psw),
142 [subcode] "i" (DIAG500_SC_STOR_LIMIT)
143 : "memory", "1", "2");
146 /* Convert inclusive end to exclusive end */
147 *max_physmem_end = storage_limit + 1;
151 static int tprot(unsigned long addr)
153 unsigned long reg1, reg2;
159 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
160 " epsw %[reg1],%[reg2]\n"
161 " st %[reg1],0(%[psw_pgm])\n"
162 " st %[reg2],4(%[psw_pgm])\n"
164 " stg %[reg1],8(%[psw_pgm])\n"
165 " tprot 0(%[addr]),0\n"
167 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
170 [exc] "+d" (exception),
173 "=Q" (get_lowcore()->program_new_psw.addr),
175 : [psw_old] "a" (&old),
176 [psw_pgm] "a" (&get_lowcore()->program_new_psw),
178 : CC_CLOBBER_LIST("memory"));
179 cc = exception ? -EFAULT : CC_TRANSFORM(cc);
183 static unsigned long search_mem_end(void)
185 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
186 unsigned long offset = 0;
191 pivot = offset + range;
192 if (!tprot(pivot << 20))
195 return (offset + 1) << 20;
198 unsigned long detect_max_physmem_end(void)
200 unsigned long max_physmem_end = 0;
202 if (!diag500_storage_limit(&max_physmem_end)) {
203 physmem_info.info_source = MEM_DETECT_DIAG500_STOR_LIMIT;
204 } else if (!sclp_early_get_memsize(&max_physmem_end)) {
205 physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
207 max_physmem_end = search_mem_end();
208 physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
210 return max_physmem_end;
213 void detect_physmem_online_ranges(unsigned long max_physmem_end)
215 if (!sclp_early_read_storage_info()) {
216 physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
217 } else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
218 unsigned long online_end;
220 if (!sclp_early_get_memsize(&online_end)) {
221 physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
222 add_physmem_online_range(0, online_end);
224 } else if (!diag260()) {
225 physmem_info.info_source = MEM_DETECT_DIAG260;
226 } else if (max_physmem_end) {
227 add_physmem_online_range(0, max_physmem_end);
231 void physmem_set_usable_limit(unsigned long limit)
233 physmem_info.usable = limit;
234 physmem_alloc_pos = limit;
237 static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
239 unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
240 struct reserved_range *range;
241 enum reserved_range_type t;
244 boot_printk("Linux version %s\n", kernel_version);
245 if (!is_prot_virt_guest() && early_command_line[0])
246 boot_printk("Kernel command line: %s\n", early_command_line);
247 boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
248 size, align, min, max);
249 boot_printk("Reserved memory ranges:\n");
250 for_each_physmem_reserved_range(t, range, &start, &end) {
251 boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
252 total_reserved_mem += end - start;
254 boot_printk("Usable online memory ranges (info source: %s [%x]):\n",
255 get_physmem_info_source(), physmem_info.info_source);
256 for_each_physmem_usable_range(i, &start, &end) {
257 boot_printk("%016lx %016lx\n", start, end);
258 total_mem += end - start;
260 boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
261 total_mem, total_reserved_mem,
262 total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
263 print_stacktrace(current_frame_address());
264 boot_printk("\n\n -- System halted\n");
268 void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
270 physmem_info.reserved[type].start = addr;
271 physmem_info.reserved[type].end = addr + size;
274 void physmem_free(enum reserved_range_type type)
276 physmem_info.reserved[type].start = 0;
277 physmem_info.reserved[type].end = 0;
280 static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
281 unsigned long *intersection_start)
283 unsigned long res_addr, res_size;
286 for (t = 0; t < RR_MAX; t++) {
287 if (!get_physmem_reserved(t, &res_addr, &res_size))
289 if (intersects(addr, size, res_addr, res_size)) {
290 *intersection_start = res_addr;
294 return ipl_report_certs_intersects(addr, size, intersection_start);
297 static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
298 unsigned long min, unsigned long max,
299 unsigned int from_ranges, unsigned int *ranges_left,
302 unsigned int nranges = from_ranges ?: physmem_info.range_count;
303 unsigned long range_start, range_end;
304 unsigned long intersection_start;
305 unsigned long addr, pos = max;
307 align = max(align, 8UL);
309 __get_physmem_range(nranges - 1, &range_start, &range_end, false);
310 pos = min(range_end, pos);
312 if (round_up(min, align) + size > pos)
314 addr = round_down(pos - size, align);
315 if (range_start > addr) {
319 if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
320 pos = intersection_start;
325 *ranges_left = nranges;
329 die_oom(size, align, min, max);
333 unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
334 unsigned long align, unsigned long min, unsigned long max,
339 max = min(max, physmem_alloc_pos);
340 addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
342 physmem_reserve(type, addr, size);
346 unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
349 struct reserved_range *range = &physmem_info.reserved[type];
350 struct reserved_range *new_range;
351 unsigned int ranges_left;
354 addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
356 /* if not a consecutive allocation of the same type or first allocation */
357 if (range->start != addr + size) {
359 physmem_alloc_pos = __physmem_alloc_range(
360 sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
361 physmem_alloc_ranges, &ranges_left, true);
362 new_range = (struct reserved_range *)physmem_alloc_pos;
364 range->chain = new_range;
365 addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
366 ranges_left, &ranges_left, true);
368 range->end = addr + size;
371 physmem_alloc_pos = addr;
372 physmem_alloc_ranges = ranges_left;
376 unsigned long get_physmem_alloc_pos(void)
378 return physmem_alloc_pos;