1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Procedures for interfacing to Open Firmware.
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
14 /* we cannot use FORTIFY as it brings in new symbols */
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
33 #include <asm/processor.h>
38 #include <asm/iommu.h>
39 #include <asm/btext.h>
40 #include <asm/sections.h>
41 #include <asm/machdep.h>
42 #include <asm/asm-prototypes.h>
43 #include <asm/ultravisor-api.h>
45 #include <linux/linux_logo.h>
47 /* All of prom_init bss lives here */
48 #define __prombss __section(".bss.prominit")
51 * Eventually bump that one up
53 #define DEVTREE_CHUNK_SIZE 0x100000
56 * This is the size of the local memory reserve map that gets copied
57 * into the boot params passed to the kernel. That size is totally
58 * flexible as the kernel just reads the list until it encounters an
59 * entry with size 0, so it can be changed without breaking binary
62 #define MEM_RESERVE_MAP_SIZE 8
65 * prom_init() is called very early on, before the kernel text
66 * and data have been mapped to KERNELBASE. At this point the code
67 * is running at whatever address it has been loaded at.
68 * On ppc32 we compile with -mrelocatable, which means that references
69 * to extern and static variables get relocated automatically.
70 * ppc64 objects are always relocatable, we just need to relocate the
73 * Because OF may have mapped I/O devices into the area starting at
74 * KERNELBASE, particularly on CHRP machines, we can't safely call
75 * OF once the kernel has been mapped to KERNELBASE. Therefore all
76 * OF calls must be done within prom_init().
78 * ADDR is used in calls to call_prom. The 4th and following
79 * arguments to call_prom should be 32-bit values.
80 * On ppc64, 64 bit values are truncated to 32 bits (and
81 * fortunately don't get interpreted as two arguments).
83 #define ADDR(x) (u32)(unsigned long)(x)
86 #define OF_WORKAROUNDS 0
88 #define OF_WORKAROUNDS of_workarounds
89 static int of_workarounds __prombss;
92 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
93 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
95 #define PROM_BUG() do { \
96 prom_printf("kernel BUG at %s line 0x%x!\n", \
97 __FILE__, __LINE__); \
102 #define prom_debug(x...) prom_printf(x)
104 #define prom_debug(x...) do { } while (0)
108 typedef u32 prom_arg_t;
126 struct mem_map_entry {
131 typedef __be32 cell_t;
133 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
134 unsigned long r6, unsigned long r7, unsigned long r8,
138 extern int enter_prom(struct prom_args *args, unsigned long entry);
140 static inline int enter_prom(struct prom_args *args, unsigned long entry)
142 return ((int (*)(struct prom_args *))entry)(args);
146 extern void copy_and_flush(unsigned long dest, unsigned long src,
147 unsigned long size, unsigned long offset);
150 static struct prom_t __prombss prom;
152 static unsigned long __prombss prom_entry;
154 static char __prombss of_stdout_device[256];
155 static char __prombss prom_scratch[256];
157 static unsigned long __prombss dt_header_start;
158 static unsigned long __prombss dt_struct_start, dt_struct_end;
159 static unsigned long __prombss dt_string_start, dt_string_end;
161 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
164 static int __prombss prom_iommu_force_on;
165 static int __prombss prom_iommu_off;
166 static unsigned long __prombss prom_tce_alloc_start;
167 static unsigned long __prombss prom_tce_alloc_end;
170 #ifdef CONFIG_PPC_PSERIES
171 static bool __prombss prom_radix_disable;
172 static bool __prombss prom_radix_gtse_disable;
173 static bool __prombss prom_xive_disable;
176 #ifdef CONFIG_PPC_SVM
177 static bool __prombss prom_svm_enable;
180 struct platform_support {
187 /* Platforms codes are now obsolete in the kernel. Now only used within this
188 * file and ultimately gone too. Feel free to change them if you need, they
189 * are not shared with anything outside of this file anymore
191 #define PLATFORM_PSERIES 0x0100
192 #define PLATFORM_PSERIES_LPAR 0x0101
193 #define PLATFORM_LPAR 0x0001
194 #define PLATFORM_POWERMAC 0x0400
195 #define PLATFORM_GENERIC 0x0500
197 static int __prombss of_platform;
199 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
201 static unsigned long __prombss prom_memory_limit;
203 static unsigned long __prombss alloc_top;
204 static unsigned long __prombss alloc_top_high;
205 static unsigned long __prombss alloc_bottom;
206 static unsigned long __prombss rmo_top;
207 static unsigned long __prombss ram_top;
209 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
210 static int __prombss mem_reserve_cnt;
212 static cell_t __prombss regbuf[1024];
214 static bool __prombss rtas_has_query_cpu_stopped;
218 * Error results ... some OF calls will return "-1" on error, some
219 * will return 0, some will return either. To simplify, here are
220 * macros to use with any ihandle or phandle return value to check if
224 #define PROM_ERROR (-1u)
225 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
226 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
228 /* Copied from lib/string.c and lib/kstrtox.c */
230 static int __init prom_strcmp(const char *cs, const char *ct)
232 unsigned char c1, c2;
238 return c1 < c2 ? -1 : 1;
245 static char __init *prom_strcpy(char *dest, const char *src)
249 while ((*dest++ = *src++) != '\0')
254 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
256 unsigned char c1, c2;
262 return c1 < c2 ? -1 : 1;
270 static size_t __init prom_strlen(const char *s)
274 for (sc = s; *sc != '\0'; ++sc)
279 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
281 const unsigned char *su1, *su2;
284 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
285 if ((res = *su1 - *su2) != 0)
290 static char __init *prom_strstr(const char *s1, const char *s2)
294 l2 = prom_strlen(s2);
297 l1 = prom_strlen(s1);
300 if (!prom_memcmp(s1, s2, l2))
307 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
309 size_t dsize = prom_strlen(dest);
310 size_t len = prom_strlen(src);
311 size_t res = dsize + len;
313 /* This would be a bug */
321 memcpy(dest, src, len);
327 #ifdef CONFIG_PPC_PSERIES
328 static int __init prom_strtobool(const char *s, bool *res)
367 /* This is the one and *ONLY* place where we actually call open
371 static int __init call_prom(const char *service, int nargs, int nret, ...)
374 struct prom_args args;
377 args.service = cpu_to_be32(ADDR(service));
378 args.nargs = cpu_to_be32(nargs);
379 args.nret = cpu_to_be32(nret);
381 va_start(list, nret);
382 for (i = 0; i < nargs; i++)
383 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
386 for (i = 0; i < nret; i++)
387 args.args[nargs+i] = 0;
389 if (enter_prom(&args, prom_entry) < 0)
392 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
395 static int __init call_prom_ret(const char *service, int nargs, int nret,
396 prom_arg_t *rets, ...)
399 struct prom_args args;
402 args.service = cpu_to_be32(ADDR(service));
403 args.nargs = cpu_to_be32(nargs);
404 args.nret = cpu_to_be32(nret);
406 va_start(list, rets);
407 for (i = 0; i < nargs; i++)
408 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
411 for (i = 0; i < nret; i++)
412 args.args[nargs+i] = 0;
414 if (enter_prom(&args, prom_entry) < 0)
418 for (i = 1; i < nret; ++i)
419 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
421 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
425 static void __init prom_print(const char *msg)
429 if (prom.stdout == 0)
432 for (p = msg; *p != 0; p = q) {
433 for (q = p; *q != 0 && *q != '\n'; ++q)
436 call_prom("write", 3, 1, prom.stdout, p, q - p);
440 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
446 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
447 * we do not need __udivdi3 or __umoddi3 on 32bits.
449 static void __init prom_print_hex(unsigned long val)
451 int i, nibbles = sizeof(val)*2;
452 char buf[sizeof(val)*2+1];
454 for (i = nibbles-1; i >= 0; i--) {
455 buf[i] = (val & 0xf) + '0';
457 buf[i] += ('a'-'0'-10);
461 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
464 /* max number of decimal digits in an unsigned long */
466 static void __init prom_print_dec(unsigned long val)
469 char buf[UL_DIGITS+1];
471 for (i = UL_DIGITS-1; i >= 0; i--) {
472 buf[i] = (val % 10) + '0';
477 /* shift stuff down */
478 size = UL_DIGITS - i;
479 call_prom("write", 3, 1, prom.stdout, buf+i, size);
483 static void __init prom_printf(const char *format, ...)
485 const char *p, *q, *s;
491 va_start(args, format);
492 for (p = format; *p != 0; p = q) {
493 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
496 call_prom("write", 3, 1, prom.stdout, p, q - p);
501 call_prom("write", 3, 1, prom.stdout,
515 s = va_arg(args, const char *);
522 v = va_arg(args, unsigned int);
525 v = va_arg(args, unsigned long);
529 v = va_arg(args, unsigned long long);
538 v = va_arg(args, unsigned int);
541 v = va_arg(args, unsigned long);
545 v = va_arg(args, unsigned long long);
554 vs = va_arg(args, int);
557 vs = va_arg(args, long);
561 vs = va_arg(args, long long);
576 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
580 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
582 * Old OF requires we claim physical and virtual separately
583 * and then map explicitly (assuming virtual mode)
588 ret = call_prom_ret("call-method", 5, 2, &result,
589 ADDR("claim"), prom.memory,
591 if (ret != 0 || result == -1)
593 ret = call_prom_ret("call-method", 5, 2, &result,
594 ADDR("claim"), prom.mmumap,
597 call_prom("call-method", 4, 1, ADDR("release"),
598 prom.memory, size, virt);
601 /* the 0x12 is M (coherence) + PP == read/write */
602 call_prom("call-method", 6, 1,
603 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
606 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
610 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
613 /* Do not call exit because it clears the screen on pmac
614 * it also causes some sort of double-fault on early pmacs */
615 if (of_platform == PLATFORM_POWERMAC)
618 /* ToDo: should put up an SRC here on pSeries */
619 call_prom("exit", 0, 0);
621 for (;;) /* should never get here */
626 static int __init prom_next_node(phandle *nodep)
630 if ((node = *nodep) != 0
631 && (*nodep = call_prom("child", 1, 1, node)) != 0)
633 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
636 if ((node = call_prom("parent", 1, 1, node)) == 0)
638 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
643 static inline int __init prom_getprop(phandle node, const char *pname,
644 void *value, size_t valuelen)
646 return call_prom("getprop", 4, 1, node, ADDR(pname),
647 (u32)(unsigned long) value, (u32) valuelen);
650 static inline int __init prom_getproplen(phandle node, const char *pname)
652 return call_prom("getproplen", 2, 1, node, ADDR(pname));
655 static void add_string(char **str, const char *q)
665 static char *tohex(unsigned int x)
667 static const char digits[] __initconst = "0123456789abcdef";
668 static char result[9] __prombss;
675 result[i] = digits[x & 0xf];
677 } while (x != 0 && i > 0);
681 static int __init prom_setprop(phandle node, const char *nodename,
682 const char *pname, void *value, size_t valuelen)
686 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
687 return call_prom("setprop", 4, 1, node, ADDR(pname),
688 (u32)(unsigned long) value, (u32) valuelen);
690 /* gah... setprop doesn't work on longtrail, have to use interpret */
692 add_string(&p, "dev");
693 add_string(&p, nodename);
694 add_string(&p, tohex((u32)(unsigned long) value));
695 add_string(&p, tohex(valuelen));
696 add_string(&p, tohex(ADDR(pname)));
697 add_string(&p, tohex(prom_strlen(pname)));
698 add_string(&p, "property");
700 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
703 /* We can't use the standard versions because of relocation headaches. */
704 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
705 || ('a' <= (c) && (c) <= 'f') \
706 || ('A' <= (c) && (c) <= 'F'))
708 #define isdigit(c) ('0' <= (c) && (c) <= '9')
709 #define islower(c) ('a' <= (c) && (c) <= 'z')
710 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
712 static unsigned long prom_strtoul(const char *cp, const char **endp)
714 unsigned long result = 0, base = 10, value;
719 if (toupper(*cp) == 'X') {
725 while (isxdigit(*cp) &&
726 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
727 result = result * base + value;
737 static unsigned long prom_memparse(const char *ptr, const char **retptr)
739 unsigned long ret = prom_strtoul(ptr, retptr);
743 * We can't use a switch here because GCC *may* generate a
744 * jump table which won't work, because we're not running at
745 * the address we're linked at.
747 if ('G' == **retptr || 'g' == **retptr)
750 if ('M' == **retptr || 'm' == **retptr)
753 if ('K' == **retptr || 'k' == **retptr)
765 * Early parsing of the command line passed to the kernel, used for
766 * "mem=x" and the options that affect the iommu
768 static void __init early_cmdline_parse(void)
775 prom_cmd_line[0] = 0;
778 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
779 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
781 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
782 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
783 sizeof(prom_cmd_line));
785 prom_printf("command line: %s\n", prom_cmd_line);
788 opt = prom_strstr(prom_cmd_line, "iommu=");
790 prom_printf("iommu opt is: %s\n", opt);
792 while (*opt && *opt == ' ')
794 if (!prom_strncmp(opt, "off", 3))
796 else if (!prom_strncmp(opt, "force", 5))
797 prom_iommu_force_on = 1;
800 opt = prom_strstr(prom_cmd_line, "mem=");
803 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
805 /* Align to 16 MB == size of ppc64 large page */
806 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
810 #ifdef CONFIG_PPC_PSERIES
811 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
812 opt = prom_strstr(prom_cmd_line, "disable_radix");
815 if (*opt && *opt == '=') {
818 if (prom_strtobool(++opt, &val))
819 prom_radix_disable = false;
821 prom_radix_disable = val;
823 prom_radix_disable = true;
825 if (prom_radix_disable)
826 prom_debug("Radix disabled from cmdline\n");
828 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
830 prom_radix_gtse_disable = true;
831 prom_debug("Radix GTSE disabled from cmdline\n");
834 opt = prom_strstr(prom_cmd_line, "xive=off");
836 prom_xive_disable = true;
837 prom_debug("XIVE disabled from cmdline\n");
839 #endif /* CONFIG_PPC_PSERIES */
841 #ifdef CONFIG_PPC_SVM
842 opt = prom_strstr(prom_cmd_line, "svm=");
846 opt += sizeof("svm=") - 1;
847 if (!prom_strtobool(opt, &val))
848 prom_svm_enable = val;
850 #endif /* CONFIG_PPC_SVM */
853 #ifdef CONFIG_PPC_PSERIES
855 * The architecture vector has an array of PVR mask/value pairs,
856 * followed by # option vectors - 1, followed by the option vectors.
858 * See prom.h for the definition of the bits specified in the
859 * architecture vector.
862 /* Firmware expects the value to be n - 1, where n is the # of vectors */
863 #define NUM_VECTORS(n) ((n) - 1)
866 * Firmware expects 1 + n - 2, where n is the length of the option vector in
867 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
869 #define VECTOR_LENGTH(n) (1 + (n) - 2)
871 struct option_vector1 {
877 struct option_vector2 {
891 struct option_vector3 {
896 struct option_vector4 {
901 struct option_vector5 {
913 u8 platform_facilities;
924 struct option_vector6 {
930 struct ibm_arch_vec {
931 struct { u32 mask, val; } pvrs[14];
936 struct option_vector1 vec1;
939 struct option_vector2 vec2;
942 struct option_vector3 vec3;
945 struct option_vector4 vec4;
948 struct option_vector5 vec5;
951 struct option_vector6 vec6;
954 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
957 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
958 .val = cpu_to_be32(0x003a0000),
961 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
962 .val = cpu_to_be32(0x003e0000),
965 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
966 .val = cpu_to_be32(0x003f0000),
969 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
970 .val = cpu_to_be32(0x004b0000),
973 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
974 .val = cpu_to_be32(0x004c0000),
977 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
978 .val = cpu_to_be32(0x004d0000),
981 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
982 .val = cpu_to_be32(0x004e0000),
985 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
986 .val = cpu_to_be32(0x00800000),
989 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
990 .val = cpu_to_be32(0x0f000006),
993 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
994 .val = cpu_to_be32(0x0f000005),
997 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
998 .val = cpu_to_be32(0x0f000004),
1001 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1002 .val = cpu_to_be32(0x0f000003),
1005 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1006 .val = cpu_to_be32(0x0f000002),
1009 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1010 .val = cpu_to_be32(0x0f000001),
1014 .num_vectors = NUM_VECTORS(6),
1016 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1019 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1020 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1021 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1024 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1025 /* option vector 2: Open Firmware options supported */
1027 .byte1 = OV2_REAL_MODE,
1029 .real_base = cpu_to_be32(0xffffffff),
1030 .real_size = cpu_to_be32(0xffffffff),
1031 .virt_base = cpu_to_be32(0xffffffff),
1032 .virt_size = cpu_to_be32(0xffffffff),
1033 .load_base = cpu_to_be32(0xffffffff),
1034 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1035 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1036 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1037 .max_pft_size = 48, /* max log_2(hash table size) */
1040 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1041 /* option vector 3: processor options supported */
1043 .byte1 = 0, /* don't ignore, don't halt */
1044 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1047 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1048 /* option vector 4: IBM PAPR implementation */
1050 .byte1 = 0, /* don't halt */
1051 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1054 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1055 /* option vector 5: PAPR/OF options */
1057 .byte1 = 0, /* don't ignore, don't halt */
1058 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1059 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1060 #ifdef CONFIG_PCI_MSI
1061 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1068 #ifdef CONFIG_PPC_SMLPAR
1069 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1073 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
1074 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1075 .micro_checkpoint = 0,
1077 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1080 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1084 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1091 /* option vector 6: IBM PAPR hints */
1092 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1095 .secondary_pteg = 0,
1096 .os_name = OV6_LINUX,
1100 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1102 /* Old method - ELF header with PT_NOTE sections only works on BE */
1103 #ifdef __BIG_ENDIAN__
1104 static const struct fake_elf {
1111 char name[8]; /* "PowerPC" */
1125 char name[24]; /* "IBM,RPA-Client-Config" */
1129 u32 min_rmo_percent;
1137 } fake_elf __initconst = {
1139 .e_ident = { 0x7f, 'E', 'L', 'F',
1140 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1141 .e_type = ET_EXEC, /* yeah right */
1142 .e_machine = EM_PPC,
1143 .e_version = EV_CURRENT,
1144 .e_phoff = offsetof(struct fake_elf, phdr),
1145 .e_phentsize = sizeof(Elf32_Phdr),
1151 .p_offset = offsetof(struct fake_elf, chrpnote),
1152 .p_filesz = sizeof(struct chrpnote)
1155 .p_offset = offsetof(struct fake_elf, rpanote),
1156 .p_filesz = sizeof(struct rpanote)
1160 .namesz = sizeof("PowerPC"),
1161 .descsz = sizeof(struct chrpdesc),
1165 .real_mode = ~0U, /* ~0 means "don't care" */
1174 .namesz = sizeof("IBM,RPA-Client-Config"),
1175 .descsz = sizeof(struct rpadesc),
1177 .name = "IBM,RPA-Client-Config",
1180 .min_rmo_size = 64, /* in megabytes */
1181 .min_rmo_percent = 0,
1182 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1189 #endif /* __BIG_ENDIAN__ */
1191 static int __init prom_count_smt_threads(void)
1197 /* Pick up th first CPU node we can find */
1198 for (node = 0; prom_next_node(&node); ) {
1200 prom_getprop(node, "device_type", type, sizeof(type));
1202 if (prom_strcmp(type, "cpu"))
1205 * There is an entry for each smt thread, each entry being
1206 * 4 bytes long. All cpus should have the same number of
1207 * smt threads, so return after finding the first.
1209 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1210 if (plen == PROM_ERROR)
1213 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1216 if (plen < 1 || plen > 64) {
1217 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1218 (unsigned long)plen);
1223 prom_debug("No threads found, assuming 1 per core\n");
1229 static void __init prom_parse_mmu_model(u8 val,
1230 struct platform_support *support)
1233 case OV5_FEAT(OV5_MMU_DYNAMIC):
1234 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1235 prom_debug("MMU - either supported\n");
1236 support->radix_mmu = !prom_radix_disable;
1237 support->hash_mmu = true;
1239 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1240 prom_debug("MMU - radix only\n");
1241 if (prom_radix_disable) {
1243 * If we __have__ to do radix, we're better off ignoring
1244 * the command line rather than not booting.
1246 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1248 support->radix_mmu = true;
1250 case OV5_FEAT(OV5_MMU_HASH):
1251 prom_debug("MMU - hash only\n");
1252 support->hash_mmu = true;
1255 prom_debug("Unknown mmu support option: 0x%x\n", val);
1260 static void __init prom_parse_xive_model(u8 val,
1261 struct platform_support *support)
1264 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1265 prom_debug("XIVE - either mode supported\n");
1266 support->xive = !prom_xive_disable;
1268 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1269 prom_debug("XIVE - exploitation mode supported\n");
1270 if (prom_xive_disable) {
1272 * If we __have__ to do XIVE, we're better off ignoring
1273 * the command line rather than not booting.
1275 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1277 support->xive = true;
1279 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1280 prom_debug("XIVE - legacy mode supported\n");
1283 prom_debug("Unknown xive support option: 0x%x\n", val);
1288 static void __init prom_parse_platform_support(u8 index, u8 val,
1289 struct platform_support *support)
1292 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1293 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1295 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1296 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1297 support->radix_gtse = !prom_radix_gtse_disable;
1299 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1300 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1306 static void __init prom_check_platform_support(void)
1308 struct platform_support supported = {
1311 .radix_gtse = false,
1314 int prop_len = prom_getproplen(prom.chosen,
1315 "ibm,arch-vec-5-platform-support");
1318 * First copy the architecture vec template
1320 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1321 * by __memcpy() when KASAN is active
1323 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1324 sizeof(ibm_architecture_vec));
1329 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1331 if (prop_len > sizeof(vec))
1332 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1334 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1335 for (i = 0; i < prop_len; i += 2) {
1336 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1337 prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1341 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1342 /* Radix preferred - Check if GTSE is also supported */
1343 prom_debug("Asking for radix\n");
1344 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1345 if (supported.radix_gtse)
1346 ibm_architecture_vec.vec5.radix_ext =
1347 OV5_FEAT(OV5_RADIX_GTSE);
1349 prom_debug("Radix GTSE isn't supported\n");
1350 } else if (supported.hash_mmu) {
1351 /* Default to hash mmu (if we can) */
1352 prom_debug("Asking for hash\n");
1353 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1355 /* We're probably on a legacy hypervisor */
1356 prom_debug("Assuming legacy hash support\n");
1359 if (supported.xive) {
1360 prom_debug("Asking for XIVE\n");
1361 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1365 static void __init prom_send_capabilities(void)
1371 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1372 prom_check_platform_support();
1374 root = call_prom("open", 1, 1, ADDR("/"));
1376 /* We need to tell the FW about the number of cores we support.
1378 * To do that, we count the number of threads on the first core
1379 * (we assume this is the same for all cores) and use it to
1383 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1384 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1387 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1389 /* try calling the ibm,client-architecture-support method */
1390 prom_printf("Calling ibm,client-architecture-support...");
1391 if (call_prom_ret("call-method", 3, 2, &ret,
1392 ADDR("ibm,client-architecture-support"),
1394 ADDR(&ibm_architecture_vec)) == 0) {
1395 /* the call exists... */
1397 prom_printf("\nWARNING: ibm,client-architecture"
1398 "-support call FAILED!\n");
1399 call_prom("close", 1, 0, root);
1400 prom_printf(" done\n");
1403 call_prom("close", 1, 0, root);
1404 prom_printf(" not implemented\n");
1407 #ifdef __BIG_ENDIAN__
1411 /* no ibm,client-architecture-support call, try the old way */
1412 elfloader = call_prom("open", 1, 1,
1413 ADDR("/packages/elf-loader"));
1414 if (elfloader == 0) {
1415 prom_printf("couldn't open /packages/elf-loader\n");
1418 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1419 elfloader, ADDR(&fake_elf));
1420 call_prom("close", 1, 0, elfloader);
1422 #endif /* __BIG_ENDIAN__ */
1424 #endif /* CONFIG_PPC_PSERIES */
1427 * Memory allocation strategy... our layout is normally:
1429 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1430 * rare cases, initrd might end up being before the kernel though.
1431 * We assume this won't override the final kernel at 0, we have no
1432 * provision to handle that in this version, but it should hopefully
1435 * alloc_top is set to the top of RMO, eventually shrink down if the
1438 * alloc_bottom is set to the top of kernel/initrd
1440 * from there, allocations are done this way : rtas is allocated
1441 * topmost, and the device-tree is allocated from the bottom. We try
1442 * to grow the device-tree allocation as we progress. If we can't,
1443 * then we fail, we don't currently have a facility to restart
1444 * elsewhere, but that shouldn't be necessary.
1446 * Note that calls to reserve_mem have to be done explicitly, memory
1447 * allocated with either alloc_up or alloc_down isn't automatically
1453 * Allocates memory in the RMO upward from the kernel/initrd
1455 * When align is 0, this is a special case, it means to allocate in place
1456 * at the current location of alloc_bottom or fail (that is basically
1457 * extending the previous allocation). Used for the device-tree flattening
1459 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1461 unsigned long base = alloc_bottom;
1462 unsigned long addr = 0;
1465 base = ALIGN(base, align);
1466 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1468 prom_panic("alloc_up() called with mem not initialized\n");
1471 base = ALIGN(alloc_bottom, align);
1473 base = alloc_bottom;
1475 for(; (base + size) <= alloc_top;
1476 base = ALIGN(base + 0x100000, align)) {
1477 prom_debug(" trying: 0x%lx\n\r", base);
1478 addr = (unsigned long)prom_claim(base, size, 0);
1479 if (addr != PROM_ERROR && addr != 0)
1487 alloc_bottom = addr + size;
1489 prom_debug(" -> %lx\n", addr);
1490 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1491 prom_debug(" alloc_top : %lx\n", alloc_top);
1492 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1493 prom_debug(" rmo_top : %lx\n", rmo_top);
1494 prom_debug(" ram_top : %lx\n", ram_top);
1500 * Allocates memory downward, either from top of RMO, or if highmem
1501 * is set, from the top of RAM. Note that this one doesn't handle
1502 * failures. It does claim memory if highmem is not set.
1504 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1507 unsigned long base, addr = 0;
1509 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1510 highmem ? "(high)" : "(low)");
1512 prom_panic("alloc_down() called with mem not initialized\n");
1515 /* Carve out storage for the TCE table. */
1516 addr = ALIGN_DOWN(alloc_top_high - size, align);
1517 if (addr <= alloc_bottom)
1519 /* Will we bump into the RMO ? If yes, check out that we
1520 * didn't overlap existing allocations there, if we did,
1521 * we are dead, we must be the first in town !
1523 if (addr < rmo_top) {
1524 /* Good, we are first */
1525 if (alloc_top == rmo_top)
1526 alloc_top = rmo_top = addr;
1530 alloc_top_high = addr;
1534 base = ALIGN_DOWN(alloc_top - size, align);
1535 for (; base > alloc_bottom;
1536 base = ALIGN_DOWN(base - 0x100000, align)) {
1537 prom_debug(" trying: 0x%lx\n\r", base);
1538 addr = (unsigned long)prom_claim(base, size, 0);
1539 if (addr != PROM_ERROR && addr != 0)
1548 prom_debug(" -> %lx\n", addr);
1549 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1550 prom_debug(" alloc_top : %lx\n", alloc_top);
1551 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1552 prom_debug(" rmo_top : %lx\n", rmo_top);
1553 prom_debug(" ram_top : %lx\n", ram_top);
1559 * Parse a "reg" cell
1561 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1564 unsigned long r = 0;
1566 /* Ignore more than 2 cells */
1567 while (s > sizeof(unsigned long) / 4) {
1571 r = be32_to_cpu(*p++);
1575 r |= be32_to_cpu(*(p++));
1583 * Very dumb function for adding to the memory reserve list, but
1584 * we don't need anything smarter at this point
1586 * XXX Eventually check for collisions. They should NEVER happen.
1587 * If problems seem to show up, it would be a good start to track
1590 static void __init reserve_mem(u64 base, u64 size)
1592 u64 top = base + size;
1593 unsigned long cnt = mem_reserve_cnt;
1598 /* We need to always keep one empty entry so that we
1599 * have our terminator with "size" set to 0 since we are
1600 * dumb and just copy this entire array to the boot params
1602 base = ALIGN_DOWN(base, PAGE_SIZE);
1603 top = ALIGN(top, PAGE_SIZE);
1606 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1607 prom_panic("Memory reserve map exhausted !\n");
1608 mem_reserve_map[cnt].base = cpu_to_be64(base);
1609 mem_reserve_map[cnt].size = cpu_to_be64(size);
1610 mem_reserve_cnt = cnt + 1;
1614 * Initialize memory allocation mechanism, parse "memory" nodes and
1615 * obtain that way the top of memory and RMO to setup out local allocator
1617 static void __init prom_init_mem(void)
1627 * We iterate the memory nodes to find
1628 * 1) top of RMO (first node)
1631 val = cpu_to_be32(2);
1632 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1633 rac = be32_to_cpu(val);
1634 val = cpu_to_be32(1);
1635 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1636 rsc = be32_to_cpu(val);
1637 prom_debug("root_addr_cells: %x\n", rac);
1638 prom_debug("root_size_cells: %x\n", rsc);
1640 prom_debug("scanning memory:\n");
1642 for (node = 0; prom_next_node(&node); ) {
1644 prom_getprop(node, "device_type", type, sizeof(type));
1648 * CHRP Longtrail machines have no device_type
1649 * on the memory node, so check the name instead...
1651 prom_getprop(node, "name", type, sizeof(type));
1653 if (prom_strcmp(type, "memory"))
1656 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1657 if (plen > sizeof(regbuf)) {
1658 prom_printf("memory node too large for buffer !\n");
1659 plen = sizeof(regbuf);
1662 endp = p + (plen / sizeof(cell_t));
1665 memset(prom_scratch, 0, sizeof(prom_scratch));
1666 call_prom("package-to-path", 3, 1, node, prom_scratch,
1667 sizeof(prom_scratch) - 1);
1668 prom_debug(" node %s :\n", prom_scratch);
1669 #endif /* DEBUG_PROM */
1671 while ((endp - p) >= (rac + rsc)) {
1672 unsigned long base, size;
1674 base = prom_next_cell(rac, &p);
1675 size = prom_next_cell(rsc, &p);
1679 prom_debug(" %lx %lx\n", base, size);
1680 if (base == 0 && (of_platform & PLATFORM_LPAR))
1682 if ((base + size) > ram_top)
1683 ram_top = base + size;
1687 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1690 * If prom_memory_limit is set we reduce the upper limits *except* for
1691 * alloc_top_high. This must be the real top of RAM so we can put
1695 alloc_top_high = ram_top;
1697 if (prom_memory_limit) {
1698 if (prom_memory_limit <= alloc_bottom) {
1699 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1701 prom_memory_limit = 0;
1702 } else if (prom_memory_limit >= ram_top) {
1703 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1705 prom_memory_limit = 0;
1707 ram_top = prom_memory_limit;
1708 rmo_top = min(rmo_top, prom_memory_limit);
1713 * Setup our top alloc point, that is top of RMO or top of
1714 * segment 0 when running non-LPAR.
1715 * Some RS64 machines have buggy firmware where claims up at
1716 * 1GB fail. Cap at 768MB as a workaround.
1717 * Since 768MB is plenty of room, and we need to cap to something
1718 * reasonable on 32-bit, cap at 768MB on all machines.
1722 rmo_top = min(0x30000000ul, rmo_top);
1723 alloc_top = rmo_top;
1724 alloc_top_high = ram_top;
1727 * Check if we have an initrd after the kernel but still inside
1728 * the RMO. If we do move our bottom point to after it.
1730 if (prom_initrd_start &&
1731 prom_initrd_start < rmo_top &&
1732 prom_initrd_end > alloc_bottom)
1733 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1735 prom_printf("memory layout at init:\n");
1736 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1738 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1739 prom_printf(" alloc_top : %lx\n", alloc_top);
1740 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1741 prom_printf(" rmo_top : %lx\n", rmo_top);
1742 prom_printf(" ram_top : %lx\n", ram_top);
1745 static void __init prom_close_stdin(void)
1750 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1751 stdin = be32_to_cpu(val);
1752 call_prom("close", 1, 0, stdin);
1756 #ifdef CONFIG_PPC_SVM
1757 static int prom_rtas_hcall(uint64_t args)
1759 register uint64_t arg1 asm("r3") = H_RTAS;
1760 register uint64_t arg2 asm("r4") = args;
1762 asm volatile("sc 1\n" : "=r" (arg1) :
1768 static struct rtas_args __prombss os_term_args;
1770 static void __init prom_rtas_os_term(char *str)
1776 prom_debug("%s: start...\n", __func__);
1777 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1778 prom_debug("rtas_node: %x\n", rtas_node);
1779 if (!PHANDLE_VALID(rtas_node))
1783 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1784 token = be32_to_cpu(val);
1785 prom_debug("ibm,os-term: %x\n", token);
1787 prom_panic("Could not get token for ibm,os-term\n");
1788 os_term_args.token = cpu_to_be32(token);
1789 os_term_args.nargs = cpu_to_be32(1);
1790 os_term_args.nret = cpu_to_be32(1);
1791 os_term_args.args[0] = cpu_to_be32(__pa(str));
1792 prom_rtas_hcall((uint64_t)&os_term_args);
1794 #endif /* CONFIG_PPC_SVM */
1797 * Allocate room for and instantiate RTAS
1799 static void __init prom_instantiate_rtas(void)
1803 u32 base, entry = 0;
1807 prom_debug("prom_instantiate_rtas: start...\n");
1809 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1810 prom_debug("rtas_node: %x\n", rtas_node);
1811 if (!PHANDLE_VALID(rtas_node))
1815 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1816 size = be32_to_cpu(val);
1820 base = alloc_down(size, PAGE_SIZE, 0);
1822 prom_panic("Could not allocate memory for RTAS\n");
1824 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1825 if (!IHANDLE_VALID(rtas_inst)) {
1826 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1830 prom_printf("instantiating rtas at 0x%x...", base);
1832 if (call_prom_ret("call-method", 3, 2, &entry,
1833 ADDR("instantiate-rtas"),
1834 rtas_inst, base) != 0
1836 prom_printf(" failed\n");
1839 prom_printf(" done\n");
1841 reserve_mem(base, size);
1843 val = cpu_to_be32(base);
1844 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1846 val = cpu_to_be32(entry);
1847 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1850 /* Check if it supports "query-cpu-stopped-state" */
1851 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1852 &val, sizeof(val)) != PROM_ERROR)
1853 rtas_has_query_cpu_stopped = true;
1855 prom_debug("rtas base = 0x%x\n", base);
1856 prom_debug("rtas entry = 0x%x\n", entry);
1857 prom_debug("rtas size = 0x%x\n", size);
1859 prom_debug("prom_instantiate_rtas: end...\n");
1864 * Allocate room for and instantiate Stored Measurement Log (SML)
1866 static void __init prom_instantiate_sml(void)
1868 phandle ibmvtpm_node;
1869 ihandle ibmvtpm_inst;
1870 u32 entry = 0, size = 0, succ = 0;
1874 prom_debug("prom_instantiate_sml: start...\n");
1876 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1877 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1878 if (!PHANDLE_VALID(ibmvtpm_node))
1881 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1882 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1883 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1887 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1888 &val, sizeof(val)) != PROM_ERROR) {
1889 if (call_prom_ret("call-method", 2, 2, &succ,
1890 ADDR("reformat-sml-to-efi-alignment"),
1891 ibmvtpm_inst) != 0 || succ == 0) {
1892 prom_printf("Reformat SML to EFI alignment failed\n");
1896 if (call_prom_ret("call-method", 2, 2, &size,
1897 ADDR("sml-get-allocated-size"),
1898 ibmvtpm_inst) != 0 || size == 0) {
1899 prom_printf("SML get allocated size failed\n");
1903 if (call_prom_ret("call-method", 2, 2, &size,
1904 ADDR("sml-get-handover-size"),
1905 ibmvtpm_inst) != 0 || size == 0) {
1906 prom_printf("SML get handover size failed\n");
1911 base = alloc_down(size, PAGE_SIZE, 0);
1913 prom_panic("Could not allocate memory for sml\n");
1915 prom_printf("instantiating sml at 0x%llx...", base);
1917 memset((void *)base, 0, size);
1919 if (call_prom_ret("call-method", 4, 2, &entry,
1920 ADDR("sml-handover"),
1921 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1922 prom_printf("SML handover failed\n");
1925 prom_printf(" done\n");
1927 reserve_mem(base, size);
1929 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1930 &base, sizeof(base));
1931 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1932 &size, sizeof(size));
1934 prom_debug("sml base = 0x%llx\n", base);
1935 prom_debug("sml size = 0x%x\n", size);
1937 prom_debug("prom_instantiate_sml: end...\n");
1941 * Allocate room for and initialize TCE tables
1943 #ifdef __BIG_ENDIAN__
1944 static void __init prom_initialize_tce_table(void)
1948 char compatible[64], type[64], model[64];
1949 char *path = prom_scratch;
1951 u32 minalign, minsize;
1952 u64 tce_entry, *tce_entryp;
1953 u64 local_alloc_top, local_alloc_bottom;
1959 prom_debug("starting prom_initialize_tce_table\n");
1961 /* Cache current top of allocs so we reserve a single block */
1962 local_alloc_top = alloc_top_high;
1963 local_alloc_bottom = local_alloc_top;
1965 /* Search all nodes looking for PHBs. */
1966 for (node = 0; prom_next_node(&node); ) {
1970 prom_getprop(node, "compatible",
1971 compatible, sizeof(compatible));
1972 prom_getprop(node, "device_type", type, sizeof(type));
1973 prom_getprop(node, "model", model, sizeof(model));
1975 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
1978 /* Keep the old logic intact to avoid regression. */
1979 if (compatible[0] != 0) {
1980 if ((prom_strstr(compatible, "python") == NULL) &&
1981 (prom_strstr(compatible, "Speedwagon") == NULL) &&
1982 (prom_strstr(compatible, "Winnipeg") == NULL))
1984 } else if (model[0] != 0) {
1985 if ((prom_strstr(model, "ython") == NULL) &&
1986 (prom_strstr(model, "peedwagon") == NULL) &&
1987 (prom_strstr(model, "innipeg") == NULL))
1991 if (prom_getprop(node, "tce-table-minalign", &minalign,
1992 sizeof(minalign)) == PROM_ERROR)
1994 if (prom_getprop(node, "tce-table-minsize", &minsize,
1995 sizeof(minsize)) == PROM_ERROR)
1996 minsize = 4UL << 20;
1999 * Even though we read what OF wants, we just set the table
2000 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2001 * By doing this, we avoid the pitfalls of trying to DMA to
2002 * MMIO space and the DMA alias hole.
2004 minsize = 4UL << 20;
2006 /* Align to the greater of the align or size */
2007 align = max(minalign, minsize);
2008 base = alloc_down(minsize, align, 1);
2010 prom_panic("ERROR, cannot find space for TCE table.\n");
2011 if (base < local_alloc_bottom)
2012 local_alloc_bottom = base;
2014 /* It seems OF doesn't null-terminate the path :-( */
2015 memset(path, 0, sizeof(prom_scratch));
2016 /* Call OF to setup the TCE hardware */
2017 if (call_prom("package-to-path", 3, 1, node,
2018 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2019 prom_printf("package-to-path failed\n");
2022 /* Save away the TCE table attributes for later use. */
2023 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2024 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2026 prom_debug("TCE table: %s\n", path);
2027 prom_debug("\tnode = 0x%x\n", node);
2028 prom_debug("\tbase = 0x%llx\n", base);
2029 prom_debug("\tsize = 0x%x\n", minsize);
2031 /* Initialize the table to have a one-to-one mapping
2032 * over the allocated size.
2034 tce_entryp = (u64 *)base;
2035 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2036 tce_entry = (i << PAGE_SHIFT);
2038 *tce_entryp = tce_entry;
2041 prom_printf("opening PHB %s", path);
2042 phb_node = call_prom("open", 1, 1, path);
2044 prom_printf("... failed\n");
2046 prom_printf("... done\n");
2048 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2049 phb_node, -1, minsize,
2050 (u32) base, (u32) (base >> 32));
2051 call_prom("close", 1, 0, phb_node);
2054 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2056 /* These are only really needed if there is a memory limit in
2057 * effect, but we don't know so export them always. */
2058 prom_tce_alloc_start = local_alloc_bottom;
2059 prom_tce_alloc_end = local_alloc_top;
2061 /* Flag the first invalid entry */
2062 prom_debug("ending prom_initialize_tce_table\n");
2064 #endif /* __BIG_ENDIAN__ */
2065 #endif /* CONFIG_PPC64 */
2068 * With CHRP SMP we need to use the OF to start the other processors.
2069 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2070 * so we have to put the processors into a holding pattern controlled
2071 * by the kernel (not OF) before we destroy the OF.
2073 * This uses a chunk of low memory, puts some holding pattern
2074 * code there and sends the other processors off to there until
2075 * smp_boot_cpus tells them to do something. The holding pattern
2076 * checks that address until its cpu # is there, when it is that
2077 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2078 * of setting those values.
2080 * We also use physical address 0x4 here to tell when a cpu
2081 * is in its holding pattern code.
2086 * We want to reference the copy of __secondary_hold_* in the
2087 * 0 - 0x100 address range
2089 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2091 static void __init prom_hold_cpus(void)
2096 unsigned long *spinloop
2097 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2098 unsigned long *acknowledge
2099 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2100 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2103 * On pseries, if RTAS supports "query-cpu-stopped-state",
2104 * we skip this stage, the CPUs will be started by the
2105 * kernel using RTAS.
2107 if ((of_platform == PLATFORM_PSERIES ||
2108 of_platform == PLATFORM_PSERIES_LPAR) &&
2109 rtas_has_query_cpu_stopped) {
2110 prom_printf("prom_hold_cpus: skipped\n");
2114 prom_debug("prom_hold_cpus: start...\n");
2115 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2116 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2117 prom_debug(" 1) acknowledge = 0x%lx\n",
2118 (unsigned long)acknowledge);
2119 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2120 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2122 /* Set the common spinloop variable, so all of the secondary cpus
2123 * will block when they are awakened from their OF spinloop.
2124 * This must occur for both SMP and non SMP kernels, since OF will
2125 * be trashed when we move the kernel.
2130 for (node = 0; prom_next_node(&node); ) {
2131 unsigned int cpu_no;
2135 prom_getprop(node, "device_type", type, sizeof(type));
2136 if (prom_strcmp(type, "cpu") != 0)
2139 /* Skip non-configured cpus. */
2140 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2141 if (prom_strcmp(type, "okay") != 0)
2144 reg = cpu_to_be32(-1); /* make sparse happy */
2145 prom_getprop(node, "reg", ®, sizeof(reg));
2146 cpu_no = be32_to_cpu(reg);
2148 prom_debug("cpu hw idx = %u\n", cpu_no);
2150 /* Init the acknowledge var which will be reset by
2151 * the secondary cpu when it awakens from its OF
2154 *acknowledge = (unsigned long)-1;
2156 if (cpu_no != prom.cpu) {
2157 /* Primary Thread of non-boot cpu or any thread */
2158 prom_printf("starting cpu hw idx %u... ", cpu_no);
2159 call_prom("start-cpu", 3, 0, node,
2160 secondary_hold, cpu_no);
2162 for (i = 0; (i < 100000000) &&
2163 (*acknowledge == ((unsigned long)-1)); i++ )
2166 if (*acknowledge == cpu_no)
2167 prom_printf("done\n");
2169 prom_printf("failed: %lx\n", *acknowledge);
2173 prom_printf("boot cpu hw idx %u\n", cpu_no);
2174 #endif /* CONFIG_SMP */
2177 prom_debug("prom_hold_cpus: end...\n");
2181 static void __init prom_init_client_services(unsigned long pp)
2183 /* Get a handle to the prom entry point before anything else */
2186 /* get a handle for the stdout device */
2187 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2188 if (!PHANDLE_VALID(prom.chosen))
2189 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2191 /* get device tree root */
2192 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2193 if (!PHANDLE_VALID(prom.root))
2194 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2201 * For really old powermacs, we need to map things we claim.
2202 * For that, we need the ihandle of the mmu.
2203 * Also, on the longtrail, we need to work around other bugs.
2205 static void __init prom_find_mmu(void)
2210 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2211 if (!PHANDLE_VALID(oprom))
2213 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2215 version[sizeof(version) - 1] = 0;
2216 /* XXX might need to add other versions here */
2217 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2218 of_workarounds = OF_WA_CLAIM;
2219 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2220 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2221 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2224 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2225 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2226 sizeof(prom.mmumap));
2227 prom.mmumap = be32_to_cpu(prom.mmumap);
2228 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2229 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2232 #define prom_find_mmu()
2235 static void __init prom_init_stdout(void)
2237 char *path = of_stdout_device;
2239 phandle stdout_node;
2242 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2243 prom_panic("cannot find stdout");
2245 prom.stdout = be32_to_cpu(val);
2247 /* Get the full OF pathname of the stdout device */
2248 memset(path, 0, 256);
2249 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2250 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2251 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2252 path, prom_strlen(path) + 1);
2254 /* instance-to-package fails on PA-Semi */
2255 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2256 if (stdout_node != PROM_ERROR) {
2257 val = cpu_to_be32(stdout_node);
2259 /* If it's a display, note it */
2260 memset(type, 0, sizeof(type));
2261 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2262 if (prom_strcmp(type, "display") == 0)
2263 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2267 static int __init prom_find_machine_type(void)
2276 /* Look for a PowerMac or a Cell */
2277 len = prom_getprop(prom.root, "compatible",
2278 compat, sizeof(compat)-1);
2282 char *p = &compat[i];
2283 int sl = prom_strlen(p);
2286 if (prom_strstr(p, "Power Macintosh") ||
2287 prom_strstr(p, "MacRISC"))
2288 return PLATFORM_POWERMAC;
2290 /* We must make sure we don't detect the IBM Cell
2291 * blades as pSeries due to some firmware issues,
2294 if (prom_strstr(p, "IBM,CBEA") ||
2295 prom_strstr(p, "IBM,CPBW-1.0"))
2296 return PLATFORM_GENERIC;
2297 #endif /* CONFIG_PPC64 */
2302 /* Try to figure out if it's an IBM pSeries or any other
2303 * PAPR compliant platform. We assume it is if :
2304 * - /device_type is "chrp" (please, do NOT use that for future
2308 len = prom_getprop(prom.root, "device_type",
2309 compat, sizeof(compat)-1);
2311 return PLATFORM_GENERIC;
2312 if (prom_strcmp(compat, "chrp"))
2313 return PLATFORM_GENERIC;
2315 /* Default to pSeries. We need to know if we are running LPAR */
2316 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2317 if (!PHANDLE_VALID(rtas))
2318 return PLATFORM_GENERIC;
2319 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2320 if (x != PROM_ERROR) {
2321 prom_debug("Hypertas detected, assuming LPAR !\n");
2322 return PLATFORM_PSERIES_LPAR;
2324 return PLATFORM_PSERIES;
2326 return PLATFORM_GENERIC;
2330 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2332 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2336 * If we have a display that we don't know how to drive,
2337 * we will want to try to execute OF's open method for it
2338 * later. However, OF will probably fall over if we do that
2339 * we've taken over the MMU.
2340 * So we check whether we will need to open the display,
2341 * and if so, open it now.
2343 static void __init prom_check_displays(void)
2345 char type[16], *path;
2350 static const unsigned char default_colors[] __initconst = {
2368 const unsigned char *clut;
2370 prom_debug("Looking for displays\n");
2371 for (node = 0; prom_next_node(&node); ) {
2372 memset(type, 0, sizeof(type));
2373 prom_getprop(node, "device_type", type, sizeof(type));
2374 if (prom_strcmp(type, "display") != 0)
2377 /* It seems OF doesn't null-terminate the path :-( */
2378 path = prom_scratch;
2379 memset(path, 0, sizeof(prom_scratch));
2382 * leave some room at the end of the path for appending extra
2385 if (call_prom("package-to-path", 3, 1, node, path,
2386 sizeof(prom_scratch) - 10) == PROM_ERROR)
2388 prom_printf("found display : %s, opening... ", path);
2390 ih = call_prom("open", 1, 1, path);
2392 prom_printf("failed\n");
2397 prom_printf("done\n");
2398 prom_setprop(node, path, "linux,opened", NULL, 0);
2400 /* Setup a usable color table when the appropriate
2401 * method is available. Should update this to set-colors */
2402 clut = default_colors;
2403 for (i = 0; i < 16; i++, clut += 3)
2404 if (prom_set_color(ih, i, clut[0], clut[1],
2408 #ifdef CONFIG_LOGO_LINUX_CLUT224
2409 clut = PTRRELOC(logo_linux_clut224.clut);
2410 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2411 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2414 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2416 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2417 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2419 u32 width, height, pitch, addr;
2421 prom_printf("Setting btext !\n");
2423 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2426 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2429 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2432 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2435 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2436 width, height, pitch, addr);
2437 btext_setup_display(width, height, 8, pitch, addr);
2438 btext_prepare_BAT();
2440 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2445 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2446 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2447 unsigned long needed, unsigned long align)
2451 *mem_start = ALIGN(*mem_start, align);
2452 while ((*mem_start + needed) > *mem_end) {
2453 unsigned long room, chunk;
2455 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2457 room = alloc_top - alloc_bottom;
2458 if (room > DEVTREE_CHUNK_SIZE)
2459 room = DEVTREE_CHUNK_SIZE;
2460 if (room < PAGE_SIZE)
2461 prom_panic("No memory for flatten_device_tree "
2463 chunk = alloc_up(room, 0);
2465 prom_panic("No memory for flatten_device_tree "
2466 "(claim failed)\n");
2467 *mem_end = chunk + room;
2470 ret = (void *)*mem_start;
2471 *mem_start += needed;
2476 #define dt_push_token(token, mem_start, mem_end) do { \
2477 void *room = make_room(mem_start, mem_end, 4, 4); \
2478 *(__be32 *)room = cpu_to_be32(token); \
2481 static unsigned long __init dt_find_string(char *str)
2485 s = os = (char *)dt_string_start;
2487 while (s < (char *)dt_string_end) {
2488 if (prom_strcmp(s, str) == 0)
2490 s += prom_strlen(s) + 1;
2496 * The Open Firmware 1275 specification states properties must be 31 bytes or
2497 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2499 #define MAX_PROPERTY_NAME 64
2501 static void __init scan_dt_build_strings(phandle node,
2502 unsigned long *mem_start,
2503 unsigned long *mem_end)
2505 char *prev_name, *namep, *sstart;
2509 sstart = (char *)dt_string_start;
2511 /* get and store all property names */
2514 /* 64 is max len of name including nul. */
2515 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2516 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2517 /* No more nodes: unwind alloc */
2518 *mem_start = (unsigned long)namep;
2523 if (prom_strcmp(namep, "name") == 0) {
2524 *mem_start = (unsigned long)namep;
2528 /* get/create string entry */
2529 soff = dt_find_string(namep);
2531 *mem_start = (unsigned long)namep;
2532 namep = sstart + soff;
2534 /* Trim off some if we can */
2535 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2536 dt_string_end = *mem_start;
2541 /* do all our children */
2542 child = call_prom("child", 1, 1, node);
2543 while (child != 0) {
2544 scan_dt_build_strings(child, mem_start, mem_end);
2545 child = call_prom("peer", 1, 1, child);
2549 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2550 unsigned long *mem_end)
2553 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2555 unsigned char *valp;
2556 static char pname[MAX_PROPERTY_NAME] __prombss;
2557 int l, room, has_phandle = 0;
2559 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2561 /* get the node's full name */
2562 namep = (char *)*mem_start;
2563 room = *mem_end - *mem_start;
2566 l = call_prom("package-to-path", 3, 1, node, namep, room);
2568 /* Didn't fit? Get more room. */
2570 if (l >= *mem_end - *mem_start)
2571 namep = make_room(mem_start, mem_end, l+1, 1);
2572 call_prom("package-to-path", 3, 1, node, namep, l);
2576 /* Fixup an Apple bug where they have bogus \0 chars in the
2577 * middle of the path in some properties, and extract
2578 * the unit name (everything after the last '/').
2580 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2587 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2590 /* get it again for debugging */
2591 path = prom_scratch;
2592 memset(path, 0, sizeof(prom_scratch));
2593 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2595 /* get and store all properties */
2597 sstart = (char *)dt_string_start;
2599 if (call_prom("nextprop", 3, 1, node, prev_name,
2604 if (prom_strcmp(pname, "name") == 0) {
2609 /* find string offset */
2610 soff = dt_find_string(pname);
2612 prom_printf("WARNING: Can't find string index for"
2613 " <%s>, node %s\n", pname, path);
2616 prev_name = sstart + soff;
2619 l = call_prom("getproplen", 2, 1, node, pname);
2622 if (l == PROM_ERROR)
2625 /* push property head */
2626 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2627 dt_push_token(l, mem_start, mem_end);
2628 dt_push_token(soff, mem_start, mem_end);
2630 /* push property content */
2631 valp = make_room(mem_start, mem_end, l, 4);
2632 call_prom("getprop", 4, 1, node, pname, valp, l);
2633 *mem_start = ALIGN(*mem_start, 4);
2635 if (!prom_strcmp(pname, "phandle"))
2639 /* Add a "phandle" property if none already exist */
2641 soff = dt_find_string("phandle");
2643 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2645 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2646 dt_push_token(4, mem_start, mem_end);
2647 dt_push_token(soff, mem_start, mem_end);
2648 valp = make_room(mem_start, mem_end, 4, 4);
2649 *(__be32 *)valp = cpu_to_be32(node);
2653 /* do all our children */
2654 child = call_prom("child", 1, 1, node);
2655 while (child != 0) {
2656 scan_dt_build_struct(child, mem_start, mem_end);
2657 child = call_prom("peer", 1, 1, child);
2660 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2663 static void __init flatten_device_tree(void)
2666 unsigned long mem_start, mem_end, room;
2667 struct boot_param_header *hdr;
2672 * Check how much room we have between alloc top & bottom (+/- a
2673 * few pages), crop to 1MB, as this is our "chunk" size
2675 room = alloc_top - alloc_bottom - 0x4000;
2676 if (room > DEVTREE_CHUNK_SIZE)
2677 room = DEVTREE_CHUNK_SIZE;
2678 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2680 /* Now try to claim that */
2681 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2683 prom_panic("Can't allocate initial device-tree chunk\n");
2684 mem_end = mem_start + room;
2686 /* Get root of tree */
2687 root = call_prom("peer", 1, 1, (phandle)0);
2688 if (root == (phandle)0)
2689 prom_panic ("couldn't get device tree root\n");
2691 /* Build header and make room for mem rsv map */
2692 mem_start = ALIGN(mem_start, 4);
2693 hdr = make_room(&mem_start, &mem_end,
2694 sizeof(struct boot_param_header), 4);
2695 dt_header_start = (unsigned long)hdr;
2696 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2698 /* Start of strings */
2699 mem_start = PAGE_ALIGN(mem_start);
2700 dt_string_start = mem_start;
2701 mem_start += 4; /* hole */
2703 /* Add "phandle" in there, we'll need it */
2704 namep = make_room(&mem_start, &mem_end, 16, 1);
2705 prom_strcpy(namep, "phandle");
2706 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2708 /* Build string array */
2709 prom_printf("Building dt strings...\n");
2710 scan_dt_build_strings(root, &mem_start, &mem_end);
2711 dt_string_end = mem_start;
2713 /* Build structure */
2714 mem_start = PAGE_ALIGN(mem_start);
2715 dt_struct_start = mem_start;
2716 prom_printf("Building dt structure...\n");
2717 scan_dt_build_struct(root, &mem_start, &mem_end);
2718 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2719 dt_struct_end = PAGE_ALIGN(mem_start);
2722 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2723 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2724 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2725 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2726 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2727 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2728 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2729 hdr->version = cpu_to_be32(OF_DT_VERSION);
2730 /* Version 16 is not backward compatible */
2731 hdr->last_comp_version = cpu_to_be32(0x10);
2733 /* Copy the reserve map in */
2734 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2739 prom_printf("reserved memory map:\n");
2740 for (i = 0; i < mem_reserve_cnt; i++)
2741 prom_printf(" %llx - %llx\n",
2742 be64_to_cpu(mem_reserve_map[i].base),
2743 be64_to_cpu(mem_reserve_map[i].size));
2746 /* Bump mem_reserve_cnt to cause further reservations to fail
2747 * since it's too late.
2749 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2751 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2752 dt_string_start, dt_string_end);
2753 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2754 dt_struct_start, dt_struct_end);
2757 #ifdef CONFIG_PPC_MAPLE
2758 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2759 * The values are bad, and it doesn't even have the right number of cells. */
2760 static void __init fixup_device_tree_maple(void)
2763 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2767 name = "/ht@0/isa@4";
2768 isa = call_prom("finddevice", 1, 1, ADDR(name));
2769 if (!PHANDLE_VALID(isa)) {
2770 name = "/ht@0/isa@6";
2771 isa = call_prom("finddevice", 1, 1, ADDR(name));
2772 rloc = 0x01003000; /* IO space; PCI device = 6 */
2774 if (!PHANDLE_VALID(isa))
2777 if (prom_getproplen(isa, "ranges") != 12)
2779 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2783 if (isa_ranges[0] != 0x1 ||
2784 isa_ranges[1] != 0xf4000000 ||
2785 isa_ranges[2] != 0x00010000)
2788 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2790 isa_ranges[0] = 0x1;
2791 isa_ranges[1] = 0x0;
2792 isa_ranges[2] = rloc;
2793 isa_ranges[3] = 0x0;
2794 isa_ranges[4] = 0x0;
2795 isa_ranges[5] = 0x00010000;
2796 prom_setprop(isa, name, "ranges",
2797 isa_ranges, sizeof(isa_ranges));
2800 #define CPC925_MC_START 0xf8000000
2801 #define CPC925_MC_LENGTH 0x1000000
2802 /* The values for memory-controller don't have right number of cells */
2803 static void __init fixup_device_tree_maple_memory_controller(void)
2807 char *name = "/hostbridge@f8000000";
2810 mc = call_prom("finddevice", 1, 1, ADDR(name));
2811 if (!PHANDLE_VALID(mc))
2814 if (prom_getproplen(mc, "reg") != 8)
2817 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2818 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2819 if ((ac != 2) || (sc != 2))
2822 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2825 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2828 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2831 mc_reg[1] = CPC925_MC_START;
2833 mc_reg[3] = CPC925_MC_LENGTH;
2834 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2837 #define fixup_device_tree_maple()
2838 #define fixup_device_tree_maple_memory_controller()
2841 #ifdef CONFIG_PPC_CHRP
2843 * Pegasos and BriQ lacks the "ranges" property in the isa node
2844 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2845 * Pegasos has the IDE configured in legacy mode, but advertised as native
2847 static void __init fixup_device_tree_chrp(void)
2851 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2855 name = "/pci@80000000/isa@c";
2856 ph = call_prom("finddevice", 1, 1, ADDR(name));
2857 if (!PHANDLE_VALID(ph)) {
2858 name = "/pci@ff500000/isa@6";
2859 ph = call_prom("finddevice", 1, 1, ADDR(name));
2860 rloc = 0x01003000; /* IO space; PCI device = 6 */
2862 if (PHANDLE_VALID(ph)) {
2863 rc = prom_getproplen(ph, "ranges");
2864 if (rc == 0 || rc == PROM_ERROR) {
2865 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2872 prop[5] = 0x00010000;
2873 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2877 name = "/pci@80000000/ide@C,1";
2878 ph = call_prom("finddevice", 1, 1, ADDR(name));
2879 if (PHANDLE_VALID(ph)) {
2880 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2883 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2884 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2885 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2886 if (rc == sizeof(u32)) {
2888 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2893 #define fixup_device_tree_chrp()
2896 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2897 static void __init fixup_device_tree_pmac(void)
2899 phandle u3, i2c, mpic;
2904 /* Some G5s have a missing interrupt definition, fix it up here */
2905 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2906 if (!PHANDLE_VALID(u3))
2908 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2909 if (!PHANDLE_VALID(i2c))
2911 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2912 if (!PHANDLE_VALID(mpic))
2915 /* check if proper rev of u3 */
2916 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2919 if (u3_rev < 0x35 || u3_rev > 0x39)
2921 /* does it need fixup ? */
2922 if (prom_getproplen(i2c, "interrupts") > 0)
2925 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2927 /* interrupt on this revision of u3 is number 0 and level */
2930 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2931 &interrupts, sizeof(interrupts));
2933 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2934 &parent, sizeof(parent));
2937 #define fixup_device_tree_pmac()
2940 #ifdef CONFIG_PPC_EFIKA
2942 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2943 * to talk to the phy. If the phy-handle property is missing, then this
2944 * function is called to add the appropriate nodes and link it to the
2947 static void __init fixup_device_tree_efika_add_phy(void)
2953 /* Check if /builtin/ethernet exists - bail if it doesn't */
2954 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2955 if (!PHANDLE_VALID(node))
2958 /* Check if the phy-handle property exists - bail if it does */
2959 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2964 * At this point the ethernet device doesn't have a phy described.
2965 * Now we need to add the missing phy node and linkage
2968 /* Check for an MDIO bus node - if missing then create one */
2969 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2970 if (!PHANDLE_VALID(node)) {
2971 prom_printf("Adding Ethernet MDIO node\n");
2972 call_prom("interpret", 1, 1,
2973 " s\" /builtin\" find-device"
2975 " 1 encode-int s\" #address-cells\" property"
2976 " 0 encode-int s\" #size-cells\" property"
2977 " s\" mdio\" device-name"
2978 " s\" fsl,mpc5200b-mdio\" encode-string"
2979 " s\" compatible\" property"
2980 " 0xf0003000 0x400 reg"
2982 " 0x5 encode-int encode+"
2983 " 0x3 encode-int encode+"
2984 " s\" interrupts\" property"
2988 /* Check for a PHY device node - if missing then create one and
2989 * give it's phandle to the ethernet node */
2990 node = call_prom("finddevice", 1, 1,
2991 ADDR("/builtin/mdio/ethernet-phy"));
2992 if (!PHANDLE_VALID(node)) {
2993 prom_printf("Adding Ethernet PHY node\n");
2994 call_prom("interpret", 1, 1,
2995 " s\" /builtin/mdio\" find-device"
2997 " s\" ethernet-phy\" device-name"
2998 " 0x10 encode-int s\" reg\" property"
3002 " s\" /builtin/ethernet\" find-device"
3004 " s\" phy-handle\" property"
3009 static void __init fixup_device_tree_efika(void)
3011 int sound_irq[3] = { 2, 2, 0 };
3012 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3013 3,4,0, 3,5,0, 3,6,0, 3,7,0,
3014 3,8,0, 3,9,0, 3,10,0, 3,11,0,
3015 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3020 /* Check if we're really running on a EFIKA */
3021 node = call_prom("finddevice", 1, 1, ADDR("/"));
3022 if (!PHANDLE_VALID(node))
3025 rv = prom_getprop(node, "model", prop, sizeof(prop));
3026 if (rv == PROM_ERROR)
3028 if (prom_strcmp(prop, "EFIKA5K2"))
3031 prom_printf("Applying EFIKA device tree fixups\n");
3033 /* Claiming to be 'chrp' is death */
3034 node = call_prom("finddevice", 1, 1, ADDR("/"));
3035 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3036 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3037 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3039 /* CODEGEN,description is exposed in /proc/cpuinfo so
3041 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3042 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3043 prom_setprop(node, "/", "CODEGEN,description",
3044 "Efika 5200B PowerPC System",
3045 sizeof("Efika 5200B PowerPC System"));
3047 /* Fixup bestcomm interrupts property */
3048 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3049 if (PHANDLE_VALID(node)) {
3050 len = prom_getproplen(node, "interrupts");
3052 prom_printf("Fixing bestcomm interrupts property\n");
3053 prom_setprop(node, "/builtin/bestcom", "interrupts",
3054 bcomm_irq, sizeof(bcomm_irq));
3058 /* Fixup sound interrupts property */
3059 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3060 if (PHANDLE_VALID(node)) {
3061 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3062 if (rv == PROM_ERROR) {
3063 prom_printf("Adding sound interrupts property\n");
3064 prom_setprop(node, "/builtin/sound", "interrupts",
3065 sound_irq, sizeof(sound_irq));
3069 /* Make sure ethernet phy-handle property exists */
3070 fixup_device_tree_efika_add_phy();
3073 #define fixup_device_tree_efika()
3076 #ifdef CONFIG_PPC_PASEMI_NEMO
3078 * CFE supplied on Nemo is broken in several ways, biggest
3079 * problem is that it reassigns ISA interrupts to unused mpic ints.
3080 * Add an interrupt-controller property for the io-bridge to use
3081 * and correct the ints so we can attach them to an irq_domain
3083 static void __init fixup_device_tree_pasemi(void)
3085 u32 interrupts[2], parent, rval, val = 0;
3086 char *name, *pci_name;
3089 /* Find the root pci node */
3090 name = "/pxp@0,e0000000";
3091 iob = call_prom("finddevice", 1, 1, ADDR(name));
3092 if (!PHANDLE_VALID(iob))
3095 /* check if interrupt-controller node set yet */
3096 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3099 prom_printf("adding interrupt-controller property for SB600...\n");
3101 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3103 pci_name = "/pxp@0,e0000000/pci@11";
3104 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3107 for( ; prom_next_node(&node); ) {
3108 /* scan each node for one with an interrupt */
3109 if (!PHANDLE_VALID(node))
3112 rval = prom_getproplen(node, "interrupts");
3113 if (rval == 0 || rval == PROM_ERROR)
3116 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3117 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3120 /* found a node, update both interrupts and interrupt-parent */
3121 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3122 interrupts[0] -= 203;
3123 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3124 interrupts[0] -= 213;
3125 if (interrupts[0] == 221)
3127 if (interrupts[0] == 222)
3130 prom_setprop(node, pci_name, "interrupts", interrupts,
3131 sizeof(interrupts));
3132 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3137 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3138 * so that generic isa-bridge code can add the SB600 and its on-board
3141 name = "/pxp@0,e0000000/io-bridge@0";
3142 iob = call_prom("finddevice", 1, 1, ADDR(name));
3143 if (!PHANDLE_VALID(iob))
3146 /* device_type is already set, just change it. */
3148 prom_printf("Changing device_type of SB600 node...\n");
3150 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3152 #else /* !CONFIG_PPC_PASEMI_NEMO */
3153 static inline void fixup_device_tree_pasemi(void) { }
3156 static void __init fixup_device_tree(void)
3158 fixup_device_tree_maple();
3159 fixup_device_tree_maple_memory_controller();
3160 fixup_device_tree_chrp();
3161 fixup_device_tree_pmac();
3162 fixup_device_tree_efika();
3163 fixup_device_tree_pasemi();
3166 static void __init prom_find_boot_cpu(void)
3173 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3175 prom_cpu = be32_to_cpu(rval);
3177 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3179 if (!PHANDLE_VALID(cpu_pkg))
3182 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3183 prom.cpu = be32_to_cpu(rval);
3185 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3188 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3190 #ifdef CONFIG_BLK_DEV_INITRD
3191 if (r3 && r4 && r4 != 0xdeadbeef) {
3194 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3195 prom_initrd_end = prom_initrd_start + r4;
3197 val = cpu_to_be64(prom_initrd_start);
3198 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3200 val = cpu_to_be64(prom_initrd_end);
3201 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3204 reserve_mem(prom_initrd_start,
3205 prom_initrd_end - prom_initrd_start);
3207 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3208 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3210 #endif /* CONFIG_BLK_DEV_INITRD */
3214 #ifdef CONFIG_RELOCATABLE
3215 static void reloc_toc(void)
3219 static void unreloc_toc(void)
3223 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3226 unsigned long *toc_entry;
3228 /* Get the start of the TOC by using r2 directly. */
3229 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3231 for (i = 0; i < nr_entries; i++) {
3232 *toc_entry = *toc_entry + offset;
3237 static void reloc_toc(void)
3239 unsigned long offset = reloc_offset();
3240 unsigned long nr_entries =
3241 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3243 __reloc_toc(offset, nr_entries);
3248 static void unreloc_toc(void)
3250 unsigned long offset = reloc_offset();
3251 unsigned long nr_entries =
3252 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3256 __reloc_toc(-offset, nr_entries);
3261 #ifdef CONFIG_PPC_SVM
3263 * Perform the Enter Secure Mode ultracall.
3265 static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
3267 register unsigned long r3 asm("r3") = UV_ESM;
3268 register unsigned long r4 asm("r4") = kbase;
3269 register unsigned long r5 asm("r5") = fdt;
3271 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3277 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3279 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3283 if (!prom_svm_enable)
3286 /* Switch to secure mode. */
3287 prom_printf("Switching to secure mode.\n");
3290 * The ultravisor will do an integrity check of the kernel image but we
3291 * relocated it so the check will fail. Restore the original image by
3292 * relocating it back to the kernel virtual base address.
3294 if (IS_ENABLED(CONFIG_RELOCATABLE))
3295 relocate(KERNELBASE);
3297 ret = enter_secure_mode(kbase, fdt);
3299 /* Relocate the kernel again. */
3300 if (IS_ENABLED(CONFIG_RELOCATABLE))
3303 if (ret != U_SUCCESS) {
3304 prom_printf("Returned %d from switching to secure mode.\n", ret);
3305 prom_rtas_os_term("Switch to secure mode failed.\n");
3309 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3312 #endif /* CONFIG_PPC_SVM */
3315 * We enter here early on, when the Open Firmware prom is still
3316 * handling exceptions and the MMU hash table for us.
3319 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3321 unsigned long r6, unsigned long r7,
3322 unsigned long kbase)
3327 unsigned long offset = reloc_offset();
3334 * First zero the BSS
3336 memset(&__bss_start, 0, __bss_stop - __bss_start);
3339 * Init interface to Open Firmware, get some node references,
3342 prom_init_client_services(pp);
3345 * See if this OF is old enough that we need to do explicit maps
3346 * and other workarounds
3351 * Init prom stdout device
3355 prom_printf("Preparing to boot %s", linux_banner);
3358 * Get default machine type. At this point, we do not differentiate
3359 * between pSeries SMP and pSeries LPAR
3361 of_platform = prom_find_machine_type();
3362 prom_printf("Detected machine type: %x\n", of_platform);
3364 #ifndef CONFIG_NONSTATIC_KERNEL
3365 /* Bail if this is a kdump kernel. */
3366 if (PHYSICAL_START > 0)
3367 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3371 * Check for an initrd
3373 prom_check_initrd(r3, r4);
3376 * Do early parsing of command line
3378 early_cmdline_parse();
3380 #ifdef CONFIG_PPC_PSERIES
3382 * On pSeries, inform the firmware about our capabilities
3384 if (of_platform == PLATFORM_PSERIES ||
3385 of_platform == PLATFORM_PSERIES_LPAR)
3386 prom_send_capabilities();
3390 * Copy the CPU hold code
3392 if (of_platform != PLATFORM_POWERMAC)
3393 copy_and_flush(0, kbase, 0x100, 0);
3396 * Initialize memory management within prom_init
3401 * Determine which cpu is actually running right _now_
3403 prom_find_boot_cpu();
3406 * Initialize display devices
3408 prom_check_displays();
3410 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3412 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3413 * that uses the allocator, we need to make sure we get the top of memory
3414 * available for us here...
3416 if (of_platform == PLATFORM_PSERIES)
3417 prom_initialize_tce_table();
3421 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3422 * have a usable RTAS implementation.
3424 if (of_platform != PLATFORM_POWERMAC)
3425 prom_instantiate_rtas();
3428 /* instantiate sml */
3429 prom_instantiate_sml();
3433 * On non-powermacs, put all CPUs in spin-loops.
3435 * PowerMacs use a different mechanism to spin CPUs
3437 * (This must be done after instanciating RTAS)
3439 if (of_platform != PLATFORM_POWERMAC)
3443 * Fill in some infos for use by the kernel later on
3445 if (prom_memory_limit) {
3446 __be64 val = cpu_to_be64(prom_memory_limit);
3447 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3452 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3455 if (prom_iommu_force_on)
3456 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3459 if (prom_tce_alloc_start) {
3460 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3461 &prom_tce_alloc_start,
3462 sizeof(prom_tce_alloc_start));
3463 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3464 &prom_tce_alloc_end,
3465 sizeof(prom_tce_alloc_end));
3470 * Fixup any known bugs in the device-tree
3472 fixup_device_tree();
3475 * Now finally create the flattened device-tree
3477 prom_printf("copying OF device tree...\n");
3478 flatten_device_tree();
3481 * in case stdin is USB and still active on IBM machines...
3482 * Unfortunately quiesce crashes on some powermacs if we have
3483 * closed stdin already (in particular the powerbook 101).
3485 if (of_platform != PLATFORM_POWERMAC)
3489 * Call OF "quiesce" method to shut down pending DMA's from
3492 prom_printf("Quiescing Open Firmware ...\n");
3493 call_prom("quiesce", 0, 0);
3496 * And finally, call the kernel passing it the flattened device
3497 * tree and NULL as r5, thus triggering the new entry point which
3498 * is common to us and kexec
3500 hdr = dt_header_start;
3502 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3503 prom_debug("->dt_header_start=0x%lx\n", hdr);
3506 reloc_got2(-offset);
3511 /* Move to secure memory if we're supposed to be secure guests. */
3512 setup_secure_guest(kbase, hdr);
3514 __start(hdr, kbase, 0, 0, 0, 0, 0);