]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/init/main.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * GK 2/5/95 - Changed to support mounting root fs via NFS | |
7 | * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 | |
8 | * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 | |
9 | * Simplified starting of init: Michael A. Griffith <[email protected]> | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/types.h> |
13 | #include <linux/module.h> | |
14 | #include <linux/proc_fs.h> | |
1da177e4 LT |
15 | #include <linux/kernel.h> |
16 | #include <linux/syscalls.h> | |
9b5609fd | 17 | #include <linux/stackprotector.h> |
1da177e4 LT |
18 | #include <linux/string.h> |
19 | #include <linux/ctype.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/utsname.h> | |
22 | #include <linux/ioport.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/smp_lock.h> | |
25 | #include <linux/initrd.h> | |
1da177e4 | 26 | #include <linux/bootmem.h> |
4a7a16dc | 27 | #include <linux/acpi.h> |
1da177e4 LT |
28 | #include <linux/tty.h> |
29 | #include <linux/gfp.h> | |
30 | #include <linux/percpu.h> | |
31 | #include <linux/kmod.h> | |
db64fe02 | 32 | #include <linux/vmalloc.h> |
1da177e4 | 33 | #include <linux/kernel_stat.h> |
d7cd5611 | 34 | #include <linux/start_kernel.h> |
1da177e4 | 35 | #include <linux/security.h> |
3d442233 | 36 | #include <linux/smp.h> |
1da177e4 LT |
37 | #include <linux/workqueue.h> |
38 | #include <linux/profile.h> | |
39 | #include <linux/rcupdate.h> | |
40 | #include <linux/moduleparam.h> | |
41 | #include <linux/kallsyms.h> | |
42 | #include <linux/writeback.h> | |
43 | #include <linux/cpu.h> | |
44 | #include <linux/cpuset.h> | |
ddbcc7e8 | 45 | #include <linux/cgroup.h> |
1da177e4 | 46 | #include <linux/efi.h> |
906568c9 | 47 | #include <linux/tick.h> |
6168a702 | 48 | #include <linux/interrupt.h> |
c757249a | 49 | #include <linux/taskstats_kern.h> |
ca74e92b | 50 | #include <linux/delayacct.h> |
1da177e4 LT |
51 | #include <linux/unistd.h> |
52 | #include <linux/rmap.h> | |
53 | #include <linux/mempolicy.h> | |
54 | #include <linux/key.h> | |
b6cd0b77 | 55 | #include <linux/buffer_head.h> |
94b6da5a | 56 | #include <linux/page_cgroup.h> |
9a11b49a | 57 | #include <linux/debug_locks.h> |
3ac7fe5a | 58 | #include <linux/debugobjects.h> |
fbb9ce95 | 59 | #include <linux/lockdep.h> |
3c7b4e6b | 60 | #include <linux/kmemleak.h> |
84d73786 | 61 | #include <linux/pid_namespace.h> |
1f21782e | 62 | #include <linux/device.h> |
73c27992 | 63 | #include <linux/kthread.h> |
e6fe6649 | 64 | #include <linux/sched.h> |
a1c9eea9 | 65 | #include <linux/signal.h> |
199f0ca5 | 66 | #include <linux/idr.h> |
68bf21aa | 67 | #include <linux/ftrace.h> |
22a9d645 | 68 | #include <linux/async.h> |
dfec072e | 69 | #include <linux/kmemcheck.h> |
02af61bb | 70 | #include <linux/kmemtrace.h> |
3f5ec136 | 71 | #include <trace/boot.h> |
1da177e4 LT |
72 | |
73 | #include <asm/io.h> | |
74 | #include <asm/bugs.h> | |
75 | #include <asm/setup.h> | |
a940199f | 76 | #include <asm/sections.h> |
37b73c82 | 77 | #include <asm/cacheflush.h> |
1da177e4 | 78 | |
1da177e4 LT |
79 | #ifdef CONFIG_X86_LOCAL_APIC |
80 | #include <asm/smp.h> | |
81 | #endif | |
82 | ||
aae5f662 | 83 | static int kernel_init(void *); |
1da177e4 LT |
84 | |
85 | extern void init_IRQ(void); | |
1da177e4 LT |
86 | extern void fork_init(unsigned long); |
87 | extern void mca_init(void); | |
88 | extern void sbus_init(void); | |
1da177e4 LT |
89 | extern void prio_tree_init(void); |
90 | extern void radix_tree_init(void); | |
91 | extern void free_initmem(void); | |
37b73c82 AV |
92 | #ifndef CONFIG_DEBUG_RODATA |
93 | static inline void mark_rodata_ro(void) { } | |
94 | #endif | |
1da177e4 LT |
95 | |
96 | #ifdef CONFIG_TC | |
97 | extern void tc_init(void); | |
98 | #endif | |
99 | ||
a6826048 | 100 | enum system_states system_state __read_mostly; |
1da177e4 LT |
101 | EXPORT_SYMBOL(system_state); |
102 | ||
103 | /* | |
104 | * Boot command-line arguments | |
105 | */ | |
106 | #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT | |
107 | #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT | |
108 | ||
109 | extern void time_init(void); | |
110 | /* Default late time init is NULL. archs can override this later. */ | |
d2e3192b | 111 | void (*__initdata late_time_init)(void); |
1da177e4 LT |
112 | extern void softirq_init(void); |
113 | ||
30d7e0d4 ABL |
114 | /* Untouched command line saved by arch-specific code. */ |
115 | char __initdata boot_command_line[COMMAND_LINE_SIZE]; | |
116 | /* Untouched saved command line (eg. for /proc) */ | |
117 | char *saved_command_line; | |
118 | /* Command line for parameter parsing */ | |
119 | static char *static_command_line; | |
1da177e4 LT |
120 | |
121 | static char *execute_command; | |
ffdfc409 | 122 | static char *ramdisk_execute_command; |
1da177e4 | 123 | |
8b3b2955 | 124 | #ifdef CONFIG_SMP |
1da177e4 | 125 | /* Setup configured maximum number of CPUs to activate */ |
ca74a6f8 | 126 | unsigned int __initdata setup_max_cpus = NR_CPUS; |
7e96287d | 127 | |
1da177e4 LT |
128 | /* |
129 | * Setup routine for controlling SMP activation | |
130 | * | |
131 | * Command-line option of "nosmp" or "maxcpus=0" will disable SMP | |
132 | * activation entirely (the MPS table probe still happens, though). | |
133 | * | |
134 | * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer | |
135 | * greater than 0, limits the maximum number of CPUs activated in | |
136 | * SMP mode to <NUM>. | |
137 | */ | |
65a4e574 IM |
138 | |
139 | void __weak arch_disable_smp_support(void) { } | |
61ec7567 | 140 | |
1da177e4 LT |
141 | static int __init nosmp(char *str) |
142 | { | |
ca74a6f8 | 143 | setup_max_cpus = 0; |
65a4e574 IM |
144 | arch_disable_smp_support(); |
145 | ||
8b3b2955 | 146 | return 0; |
1da177e4 LT |
147 | } |
148 | ||
8b3b2955 | 149 | early_param("nosmp", nosmp); |
1da177e4 LT |
150 | |
151 | static int __init maxcpus(char *str) | |
152 | { | |
ca74a6f8 AK |
153 | get_option(&str, &setup_max_cpus); |
154 | if (setup_max_cpus == 0) | |
65a4e574 | 155 | arch_disable_smp_support(); |
61ec7567 LB |
156 | |
157 | return 0; | |
1da177e4 LT |
158 | } |
159 | ||
81340977 | 160 | early_param("maxcpus", maxcpus); |
8b3b2955 | 161 | #else |
65a4e574 | 162 | const unsigned int setup_max_cpus = NR_CPUS; |
8b3b2955 JB |
163 | #endif |
164 | ||
165 | /* | |
166 | * If set, this is an indication to the drivers that reset the underlying | |
167 | * device before going ahead with the initialization otherwise driver might | |
168 | * rely on the BIOS and skip the reset operation. | |
169 | * | |
170 | * This is useful if kernel is booting in an unreliable environment. | |
171 | * For ex. kdump situaiton where previous kernel has crashed, BIOS has been | |
172 | * skipped and devices will be in unknown state. | |
173 | */ | |
174 | unsigned int reset_devices; | |
175 | EXPORT_SYMBOL(reset_devices); | |
1da177e4 | 176 | |
7e96287d VG |
177 | static int __init set_reset_devices(char *str) |
178 | { | |
179 | reset_devices = 1; | |
180 | return 1; | |
181 | } | |
182 | ||
183 | __setup("reset_devices", set_reset_devices); | |
184 | ||
1da177e4 LT |
185 | static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; |
186 | char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; | |
187 | static const char *panic_later, *panic_param; | |
188 | ||
189 | extern struct obs_kernel_param __setup_start[], __setup_end[]; | |
190 | ||
191 | static int __init obsolete_checksetup(char *line) | |
192 | { | |
193 | struct obs_kernel_param *p; | |
33df0d19 | 194 | int had_early_param = 0; |
1da177e4 LT |
195 | |
196 | p = __setup_start; | |
197 | do { | |
198 | int n = strlen(p->str); | |
199 | if (!strncmp(line, p->str, n)) { | |
200 | if (p->early) { | |
33df0d19 RR |
201 | /* Already done in parse_early_param? |
202 | * (Needs exact match on param part). | |
203 | * Keep iterating, as we can have early | |
204 | * params and __setups of same names 8( */ | |
1da177e4 | 205 | if (line[n] == '\0' || line[n] == '=') |
33df0d19 | 206 | had_early_param = 1; |
1da177e4 LT |
207 | } else if (!p->setup_func) { |
208 | printk(KERN_WARNING "Parameter %s is obsolete," | |
209 | " ignored\n", p->str); | |
210 | return 1; | |
211 | } else if (p->setup_func(line + n)) | |
212 | return 1; | |
213 | } | |
214 | p++; | |
215 | } while (p < __setup_end); | |
33df0d19 RR |
216 | |
217 | return had_early_param; | |
1da177e4 LT |
218 | } |
219 | ||
220 | /* | |
221 | * This should be approx 2 Bo*oMips to start (note initial shift), and will | |
222 | * still work even if initially too large, it will just take slightly longer | |
223 | */ | |
224 | unsigned long loops_per_jiffy = (1<<12); | |
225 | ||
226 | EXPORT_SYMBOL(loops_per_jiffy); | |
227 | ||
228 | static int __init debug_kernel(char *str) | |
229 | { | |
1da177e4 | 230 | console_loglevel = 10; |
f6f21c81 | 231 | return 0; |
1da177e4 LT |
232 | } |
233 | ||
234 | static int __init quiet_kernel(char *str) | |
235 | { | |
1da177e4 | 236 | console_loglevel = 4; |
f6f21c81 | 237 | return 0; |
1da177e4 LT |
238 | } |
239 | ||
f6f21c81 YL |
240 | early_param("debug", debug_kernel); |
241 | early_param("quiet", quiet_kernel); | |
1da177e4 LT |
242 | |
243 | static int __init loglevel(char *str) | |
244 | { | |
245 | get_option(&str, &console_loglevel); | |
d9d4fcfe | 246 | return 0; |
1da177e4 LT |
247 | } |
248 | ||
f6f21c81 | 249 | early_param("loglevel", loglevel); |
1da177e4 LT |
250 | |
251 | /* | |
252 | * Unknown boot options get handed to init, unless they look like | |
253 | * failed parameters | |
254 | */ | |
255 | static int __init unknown_bootoption(char *param, char *val) | |
256 | { | |
257 | /* Change NUL term back to "=", to make "param" the whole string. */ | |
258 | if (val) { | |
259 | /* param=val or param="val"? */ | |
260 | if (val == param+strlen(param)+1) | |
261 | val[-1] = '='; | |
262 | else if (val == param+strlen(param)+2) { | |
263 | val[-2] = '='; | |
264 | memmove(val-1, val, strlen(val)+1); | |
265 | val--; | |
266 | } else | |
267 | BUG(); | |
268 | } | |
269 | ||
270 | /* Handle obsolete-style parameters */ | |
271 | if (obsolete_checksetup(param)) | |
272 | return 0; | |
273 | ||
274 | /* | |
211fee8a | 275 | * Preemptive maintenance for "why didn't my misspelled command |
1da177e4 LT |
276 | * line work?" |
277 | */ | |
278 | if (strchr(param, '.') && (!val || strchr(param, '.') < val)) { | |
279 | printk(KERN_ERR "Unknown boot option `%s': ignoring\n", param); | |
280 | return 0; | |
281 | } | |
282 | ||
283 | if (panic_later) | |
284 | return 0; | |
285 | ||
286 | if (val) { | |
287 | /* Environment option */ | |
288 | unsigned int i; | |
289 | for (i = 0; envp_init[i]; i++) { | |
290 | if (i == MAX_INIT_ENVS) { | |
291 | panic_later = "Too many boot env vars at `%s'"; | |
292 | panic_param = param; | |
293 | } | |
294 | if (!strncmp(param, envp_init[i], val - param)) | |
295 | break; | |
296 | } | |
297 | envp_init[i] = param; | |
298 | } else { | |
299 | /* Command line option */ | |
300 | unsigned int i; | |
301 | for (i = 0; argv_init[i]; i++) { | |
302 | if (i == MAX_INIT_ARGS) { | |
303 | panic_later = "Too many boot init vars at `%s'"; | |
304 | panic_param = param; | |
305 | } | |
306 | } | |
307 | argv_init[i] = param; | |
308 | } | |
309 | return 0; | |
310 | } | |
311 | ||
12d6f21e IM |
312 | #ifdef CONFIG_DEBUG_PAGEALLOC |
313 | int __read_mostly debug_pagealloc_enabled = 0; | |
314 | #endif | |
315 | ||
1da177e4 LT |
316 | static int __init init_setup(char *str) |
317 | { | |
318 | unsigned int i; | |
319 | ||
320 | execute_command = str; | |
321 | /* | |
322 | * In case LILO is going to boot us with default command line, | |
323 | * it prepends "auto" before the whole cmdline which makes | |
324 | * the shell think it should execute a script with such name. | |
325 | * So we ignore all arguments entered _before_ init=... [MJ] | |
326 | */ | |
327 | for (i = 1; i < MAX_INIT_ARGS; i++) | |
328 | argv_init[i] = NULL; | |
329 | return 1; | |
330 | } | |
331 | __setup("init=", init_setup); | |
332 | ||
ffdfc409 OJ |
333 | static int __init rdinit_setup(char *str) |
334 | { | |
335 | unsigned int i; | |
336 | ||
337 | ramdisk_execute_command = str; | |
338 | /* See "auto" comment in init_setup */ | |
339 | for (i = 1; i < MAX_INIT_ARGS; i++) | |
340 | argv_init[i] = NULL; | |
341 | return 1; | |
342 | } | |
343 | __setup("rdinit=", rdinit_setup); | |
344 | ||
1da177e4 LT |
345 | #ifndef CONFIG_SMP |
346 | ||
347 | #ifdef CONFIG_X86_LOCAL_APIC | |
348 | static void __init smp_init(void) | |
349 | { | |
350 | APIC_init_uniprocessor(); | |
351 | } | |
352 | #else | |
353 | #define smp_init() do { } while (0) | |
354 | #endif | |
355 | ||
356 | static inline void setup_per_cpu_areas(void) { } | |
e0982e90 | 357 | static inline void setup_nr_cpu_ids(void) { } |
1da177e4 LT |
358 | static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
359 | ||
360 | #else | |
361 | ||
321a8e9d MT |
362 | #if NR_CPUS > BITS_PER_LONG |
363 | cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL; | |
364 | EXPORT_SYMBOL(cpu_mask_all); | |
365 | #endif | |
366 | ||
e0982e90 MT |
367 | /* Setup number of possible processor ids */ |
368 | int nr_cpu_ids __read_mostly = NR_CPUS; | |
369 | EXPORT_SYMBOL(nr_cpu_ids); | |
370 | ||
371 | /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ | |
372 | static void __init setup_nr_cpu_ids(void) | |
373 | { | |
e0c0ba73 | 374 | nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; |
e0982e90 MT |
375 | } |
376 | ||
dd5af90a | 377 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
b73b459f | 378 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
1da177e4 LT |
379 | |
380 | EXPORT_SYMBOL(__per_cpu_offset); | |
381 | ||
382 | static void __init setup_per_cpu_areas(void) | |
383 | { | |
384 | unsigned long size, i; | |
385 | char *ptr; | |
63872f87 | 386 | unsigned long nr_possible_cpus = num_possible_cpus(); |
1da177e4 LT |
387 | |
388 | /* Copy section for each CPU (we discard the original) */ | |
b6e3590f JF |
389 | size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); |
390 | ptr = alloc_bootmem_pages(size * nr_possible_cpus); | |
1da177e4 | 391 | |
0a945022 | 392 | for_each_possible_cpu(i) { |
1da177e4 LT |
393 | __per_cpu_offset[i] = ptr - __per_cpu_start; |
394 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | |
63872f87 | 395 | ptr += size; |
1da177e4 LT |
396 | } |
397 | } | |
dd5af90a | 398 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
1da177e4 LT |
399 | |
400 | /* Called by boot processor to activate the rest. */ | |
401 | static void __init smp_init(void) | |
402 | { | |
53b8a315 | 403 | unsigned int cpu; |
1da177e4 | 404 | |
e761b772 MK |
405 | /* |
406 | * Set up the current CPU as possible to migrate to. | |
407 | * The other ones will be done by cpu_up/cpu_down() | |
408 | */ | |
2b17fa50 | 409 | set_cpu_active(smp_processor_id(), true); |
e761b772 | 410 | |
1da177e4 | 411 | /* FIXME: This should be done in userspace --RR */ |
53b8a315 | 412 | for_each_present_cpu(cpu) { |
ca74a6f8 | 413 | if (num_online_cpus() >= setup_max_cpus) |
1da177e4 | 414 | break; |
53b8a315 CL |
415 | if (!cpu_online(cpu)) |
416 | cpu_up(cpu); | |
1da177e4 LT |
417 | } |
418 | ||
419 | /* Any cleanup work */ | |
420 | printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); | |
ca74a6f8 | 421 | smp_cpus_done(setup_max_cpus); |
1da177e4 LT |
422 | } |
423 | ||
424 | #endif | |
425 | ||
30d7e0d4 ABL |
426 | /* |
427 | * We need to store the untouched command line for future reference. | |
428 | * We also need to store the touched command line since the parameter | |
429 | * parsing is performed in place, and we should allow a component to | |
430 | * store reference of name/value for future reference. | |
431 | */ | |
432 | static void __init setup_command_line(char *command_line) | |
433 | { | |
434 | saved_command_line = alloc_bootmem(strlen (boot_command_line)+1); | |
435 | static_command_line = alloc_bootmem(strlen (command_line)+1); | |
436 | strcpy (saved_command_line, boot_command_line); | |
437 | strcpy (static_command_line, command_line); | |
438 | } | |
439 | ||
1da177e4 LT |
440 | /* |
441 | * We need to finalize in a non-__init function or else race conditions | |
442 | * between the root thread and the init thread may cause start_kernel to | |
443 | * be reaped by free_initmem before the root thread has proceeded to | |
444 | * cpu_idle. | |
445 | * | |
446 | * gcc-3.4 accidentally inlines this function, so use noinline. | |
447 | */ | |
448 | ||
f99ebf0a | 449 | static noinline void __init_refok rest_init(void) |
1da177e4 LT |
450 | __releases(kernel_lock) |
451 | { | |
73c27992 EB |
452 | int pid; |
453 | ||
7db905e6 | 454 | rcu_scheduler_starting(); |
aae5f662 | 455 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
1da177e4 | 456 | numa_default_policy(); |
73c27992 | 457 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
5cd20455 | 458 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
1da177e4 | 459 | unlock_kernel(); |
f340c0d1 IM |
460 | |
461 | /* | |
462 | * The boot idle thread must execute schedule() | |
1df21055 | 463 | * at least once to get things moving: |
f340c0d1 | 464 | */ |
1df21055 | 465 | init_idle_bootup_task(current); |
5bfb5d69 | 466 | preempt_enable_no_resched(); |
f340c0d1 | 467 | schedule(); |
5bfb5d69 | 468 | preempt_disable(); |
f340c0d1 | 469 | |
5bfb5d69 | 470 | /* Call into cpu_idle with preempt disabled */ |
1da177e4 | 471 | cpu_idle(); |
1df21055 | 472 | } |
1da177e4 LT |
473 | |
474 | /* Check for early params. */ | |
475 | static int __init do_early_param(char *param, char *val) | |
476 | { | |
477 | struct obs_kernel_param *p; | |
478 | ||
479 | for (p = __setup_start; p < __setup_end; p++) { | |
18a8bd94 YL |
480 | if ((p->early && strcmp(param, p->str) == 0) || |
481 | (strcmp(param, "console") == 0 && | |
482 | strcmp(p->str, "earlycon") == 0) | |
483 | ) { | |
1da177e4 LT |
484 | if (p->setup_func(val) != 0) |
485 | printk(KERN_WARNING | |
486 | "Malformed early option '%s'\n", param); | |
487 | } | |
488 | } | |
489 | /* We accept everything at this stage. */ | |
490 | return 0; | |
491 | } | |
492 | ||
13977091 MD |
493 | void __init parse_early_options(char *cmdline) |
494 | { | |
495 | parse_args("early options", cmdline, NULL, 0, do_early_param); | |
496 | } | |
497 | ||
1da177e4 LT |
498 | /* Arch code calls this early on, or if not, just before other parsing. */ |
499 | void __init parse_early_param(void) | |
500 | { | |
501 | static __initdata int done = 0; | |
502 | static __initdata char tmp_cmdline[COMMAND_LINE_SIZE]; | |
503 | ||
504 | if (done) | |
505 | return; | |
506 | ||
507 | /* All fall through to do_early_param. */ | |
30d7e0d4 | 508 | strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); |
13977091 | 509 | parse_early_options(tmp_cmdline); |
1da177e4 LT |
510 | done = 1; |
511 | } | |
512 | ||
513 | /* | |
514 | * Activate the first processor. | |
515 | */ | |
516 | ||
44fd2299 SS |
517 | static void __init boot_cpu_init(void) |
518 | { | |
519 | int cpu = smp_processor_id(); | |
520 | /* Mark the boot cpu "present", "online" etc for SMP and UP case */ | |
915441b6 RR |
521 | set_cpu_online(cpu, true); |
522 | set_cpu_present(cpu, true); | |
523 | set_cpu_possible(cpu, true); | |
44fd2299 SS |
524 | } |
525 | ||
839ad62e | 526 | void __init __weak smp_setup_processor_id(void) |
033ab7f8 AM |
527 | { |
528 | } | |
529 | ||
8c9843e5 BH |
530 | void __init __weak thread_info_cache_init(void) |
531 | { | |
532 | } | |
533 | ||
444f478f PE |
534 | /* |
535 | * Set up kernel memory allocators | |
536 | */ | |
537 | static void __init mm_init(void) | |
538 | { | |
ca371c0d KH |
539 | /* |
540 | * page_cgroup requires countinous pages as memmap | |
541 | * and it's bigger than MAX_ORDER unless SPARSEMEM. | |
542 | */ | |
543 | page_cgroup_init_flatmem(); | |
444f478f PE |
544 | mem_init(); |
545 | kmem_cache_init(); | |
c868d550 | 546 | pgtable_cache_init(); |
444f478f PE |
547 | vmalloc_init(); |
548 | } | |
549 | ||
1da177e4 LT |
550 | asmlinkage void __init start_kernel(void) |
551 | { | |
552 | char * command_line; | |
553 | extern struct kernel_param __start___param[], __stop___param[]; | |
033ab7f8 AM |
554 | |
555 | smp_setup_processor_id(); | |
556 | ||
fbb9ce95 IM |
557 | /* |
558 | * Need to run as early as possible, to initialize the | |
559 | * lockdep hash: | |
560 | */ | |
561 | lockdep_init(); | |
3ac7fe5a | 562 | debug_objects_early_init(); |
42059429 IM |
563 | |
564 | /* | |
565 | * Set up the the initial canary ASAP: | |
566 | */ | |
567 | boot_init_stack_canary(); | |
568 | ||
ddbcc7e8 | 569 | cgroup_init_early(); |
fbb9ce95 IM |
570 | |
571 | local_irq_disable(); | |
572 | early_boot_irqs_off(); | |
243c7621 | 573 | early_init_irq_lock_class(); |
fbb9ce95 | 574 | |
1da177e4 LT |
575 | /* |
576 | * Interrupts are still disabled. Do necessary setups, then | |
577 | * enable them | |
578 | */ | |
579 | lock_kernel(); | |
906568c9 | 580 | tick_init(); |
44fd2299 | 581 | boot_cpu_init(); |
1da177e4 | 582 | page_address_init(); |
657cafa6 | 583 | printk(KERN_NOTICE "%s", linux_banner); |
1da177e4 | 584 | setup_arch(&command_line); |
cf475ad2 | 585 | mm_init_owner(&init_mm, &init_task); |
30d7e0d4 | 586 | setup_command_line(command_line); |
e0982e90 | 587 | setup_nr_cpu_ids(); |
d6647bdf | 588 | setup_per_cpu_areas(); |
44fd2299 | 589 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
1da177e4 | 590 | |
83b519e8 PE |
591 | build_all_zonelists(); |
592 | page_alloc_init(); | |
593 | ||
594 | printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); | |
595 | parse_early_param(); | |
596 | parse_args("Booting kernel", static_command_line, __start___param, | |
597 | __stop___param - __start___param, | |
598 | &unknown_bootoption); | |
599 | /* | |
600 | * These use large bootmem allocations and must precede | |
601 | * kmem_cache_init() | |
602 | */ | |
603 | pidhash_init(); | |
83b519e8 PE |
604 | vfs_caches_init_early(); |
605 | sort_main_extable(); | |
606 | trap_init(); | |
444f478f | 607 | mm_init(); |
1da177e4 LT |
608 | /* |
609 | * Set up the scheduler prior starting any interrupts (such as the | |
610 | * timer interrupt). Full topology setup happens at smp_init() | |
611 | * time - but meanwhile we still have a functioning scheduler. | |
612 | */ | |
613 | sched_init(); | |
614 | /* | |
615 | * Disable preemption - early bootup scheduling is extremely | |
616 | * fragile until we cpu_idle() for the first time. | |
617 | */ | |
618 | preempt_disable(); | |
c4a68306 AB |
619 | if (!irqs_disabled()) { |
620 | printk(KERN_WARNING "start_kernel(): bug: interrupts were " | |
621 | "enabled *very* early, fixing it\n"); | |
622 | local_irq_disable(); | |
623 | } | |
1da177e4 | 624 | rcu_init(); |
0b8f1efa YL |
625 | /* init some links before init_ISA_irqs() */ |
626 | early_irq_init(); | |
1da177e4 | 627 | init_IRQ(); |
3c7b4e6b | 628 | prio_tree_init(); |
1da177e4 | 629 | init_timers(); |
c0a31329 | 630 | hrtimers_init(); |
1da177e4 | 631 | softirq_init(); |
ad596171 | 632 | timekeeping_init(); |
88fecaa2 | 633 | time_init(); |
93e02814 HC |
634 | profile_init(); |
635 | if (!irqs_disabled()) | |
24d431d0 RL |
636 | printk(KERN_CRIT "start_kernel(): bug: interrupts were " |
637 | "enabled early\n"); | |
fbb9ce95 | 638 | early_boot_irqs_on(); |
93e02814 | 639 | local_irq_enable(); |
dcce284a BH |
640 | |
641 | /* Interrupts are enabled now so all GFP allocations are safe. */ | |
642 | set_gfp_allowed_mask(__GFP_BITS_MASK); | |
643 | ||
7e85ee0c | 644 | kmem_cache_init_late(); |
1da177e4 LT |
645 | |
646 | /* | |
647 | * HACK ALERT! This is early. We're enabling the console before | |
648 | * we've done PCI setups etc, and console_init() must be aware of | |
649 | * this. But we do want output early, in case something goes wrong. | |
650 | */ | |
651 | console_init(); | |
652 | if (panic_later) | |
653 | panic(panic_later, panic_param); | |
fbb9ce95 IM |
654 | |
655 | lockdep_info(); | |
656 | ||
9a11b49a IM |
657 | /* |
658 | * Need to run this when irqs are enabled, because it wants | |
659 | * to self-test [hard/soft]-irqs on/off lock inversion bugs | |
660 | * too: | |
661 | */ | |
662 | locking_selftest(); | |
663 | ||
1da177e4 LT |
664 | #ifdef CONFIG_BLK_DEV_INITRD |
665 | if (initrd_start && !initrd_below_start_ok && | |
bd673c7c | 666 | page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { |
1da177e4 | 667 | printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " |
fb6624eb | 668 | "disabling it.\n", |
bd673c7c GU |
669 | page_to_pfn(virt_to_page((void *)initrd_start)), |
670 | min_low_pfn); | |
1da177e4 LT |
671 | initrd_start = 0; |
672 | } | |
673 | #endif | |
94b6da5a | 674 | page_cgroup_init(); |
a03c2a48 | 675 | enable_debug_pagealloc(); |
b9ce08c0 | 676 | kmemtrace_init(); |
3c7b4e6b | 677 | kmemleak_init(); |
3ac7fe5a | 678 | debug_objects_mem_init(); |
199f0ca5 | 679 | idr_init_cache(); |
e7c8d5c9 | 680 | setup_per_cpu_pageset(); |
1da177e4 LT |
681 | numa_policy_init(); |
682 | if (late_time_init) | |
683 | late_time_init(); | |
fa84e9ee | 684 | sched_clock_init(); |
1da177e4 LT |
685 | calibrate_delay(); |
686 | pidmap_init(); | |
1da177e4 LT |
687 | anon_vma_init(); |
688 | #ifdef CONFIG_X86 | |
689 | if (efi_enabled) | |
690 | efi_enter_virtual_mode(); | |
691 | #endif | |
8c9843e5 | 692 | thread_info_cache_init(); |
d84f4f99 | 693 | cred_init(); |
1da177e4 LT |
694 | fork_init(num_physpages); |
695 | proc_caches_init(); | |
696 | buffer_init(); | |
1da177e4 LT |
697 | key_init(); |
698 | security_init(); | |
699 | vfs_caches_init(num_physpages); | |
700 | radix_tree_init(); | |
701 | signals_init(); | |
702 | /* rootfs populating might need page-writeback */ | |
703 | page_writeback_init(); | |
704 | #ifdef CONFIG_PROC_FS | |
705 | proc_root_init(); | |
706 | #endif | |
ddbcc7e8 | 707 | cgroup_init(); |
1da177e4 | 708 | cpuset_init(); |
c757249a | 709 | taskstats_init_early(); |
ca74e92b | 710 | delayacct_init(); |
1da177e4 LT |
711 | |
712 | check_bugs(); | |
713 | ||
714 | acpi_early_init(); /* before LAPIC and SMP init */ | |
715 | ||
68bf21aa SR |
716 | ftrace_init(); |
717 | ||
1da177e4 LT |
718 | /* Do the rest non-__init'ed, we're now alive */ |
719 | rest_init(); | |
720 | } | |
721 | ||
b99b87f7 PO |
722 | /* Call all constructor functions linked into the kernel. */ |
723 | static void __init do_ctors(void) | |
724 | { | |
725 | #ifdef CONFIG_CONSTRUCTORS | |
726 | ctor_fn_t *call = (ctor_fn_t *) __ctors_start; | |
727 | ||
728 | for (; call < (ctor_fn_t *) __ctors_end; call++) | |
729 | (*call)(); | |
730 | #endif | |
731 | } | |
732 | ||
22a9d645 | 733 | int initcall_debug; |
d0ea3d7d | 734 | core_param(initcall_debug, initcall_debug, bool, 0644); |
1da177e4 | 735 | |
4a683bf9 IM |
736 | static char msgbuf[64]; |
737 | static struct boot_trace_call call; | |
738 | static struct boot_trace_ret ret; | |
739 | ||
59f9415f | 740 | int do_one_initcall(initcall_t fn) |
1da177e4 | 741 | { |
1da177e4 | 742 | int count = preempt_count(); |
74239072 | 743 | ktime_t calltime, delta, rettime; |
1da177e4 | 744 | |
e0df154f | 745 | if (initcall_debug) { |
74239072 FW |
746 | call.caller = task_pid_nr(current); |
747 | printk("calling %pF @ %i\n", fn, call.caller); | |
748 | calltime = ktime_get(); | |
749 | trace_boot_call(&call, fn); | |
71566a0d | 750 | enable_boot_trace(); |
e0df154f | 751 | } |
1da177e4 | 752 | |
74239072 | 753 | ret.result = fn(); |
1da177e4 | 754 | |
e0df154f | 755 | if (initcall_debug) { |
71566a0d | 756 | disable_boot_trace(); |
74239072 FW |
757 | rettime = ktime_get(); |
758 | delta = ktime_sub(rettime, calltime); | |
1d926f27 | 759 | ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; |
74239072 | 760 | trace_boot_ret(&ret, fn); |
ca538f6b | 761 | printk("initcall %pF returned %d after %Ld usecs\n", fn, |
74239072 | 762 | ret.result, ret.duration); |
e0df154f | 763 | } |
8f0c45cd | 764 | |
e0df154f | 765 | msgbuf[0] = 0; |
e662e1cf | 766 | |
74239072 FW |
767 | if (ret.result && ret.result != -ENODEV && initcall_debug) |
768 | sprintf(msgbuf, "error code %d ", ret.result); | |
e662e1cf | 769 | |
e0df154f | 770 | if (preempt_count() != count) { |
a76bfd0d | 771 | strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); |
e0df154f | 772 | preempt_count() = count; |
1da177e4 | 773 | } |
e0df154f | 774 | if (irqs_disabled()) { |
a76bfd0d | 775 | strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); |
e0df154f LT |
776 | local_irq_enable(); |
777 | } | |
778 | if (msgbuf[0]) { | |
96d746c6 | 779 | printk("initcall %pF returned with %s\n", fn, msgbuf); |
e0df154f | 780 | } |
59f9415f | 781 | |
74239072 | 782 | return ret.result; |
e0df154f LT |
783 | } |
784 | ||
785 | ||
c2147a50 | 786 | extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; |
e0df154f LT |
787 | |
788 | static void __init do_initcalls(void) | |
789 | { | |
790 | initcall_t *call; | |
791 | ||
c2147a50 | 792 | for (call = __early_initcall_end; call < __initcall_end; call++) |
e0df154f | 793 | do_one_initcall(*call); |
1da177e4 LT |
794 | |
795 | /* Make sure there is no pending stuff from the initcall sequence */ | |
796 | flush_scheduled_work(); | |
797 | } | |
798 | ||
799 | /* | |
800 | * Ok, the machine is now initialized. None of the devices | |
801 | * have been touched yet, but the CPU subsystem is up and | |
802 | * running, and memory and process management works. | |
803 | * | |
804 | * Now we can finally start doing some real work.. | |
805 | */ | |
806 | static void __init do_basic_setup(void) | |
807 | { | |
4446a36f | 808 | rcu_init_sched(); /* needed by module_init stage. */ |
4403b406 | 809 | init_workqueues(); |
759ee091 | 810 | cpuset_init_smp(); |
1da177e4 LT |
811 | usermodehelper_init(); |
812 | driver_init(); | |
b04c3afb | 813 | init_irq_proc(); |
b99b87f7 | 814 | do_ctors(); |
1da177e4 LT |
815 | do_initcalls(); |
816 | } | |
817 | ||
7babe8db | 818 | static void __init do_pre_smp_initcalls(void) |
c2147a50 EGM |
819 | { |
820 | initcall_t *call; | |
821 | ||
822 | for (call = __initcall_start; call < __early_initcall_end; call++) | |
823 | do_one_initcall(*call); | |
824 | } | |
825 | ||
1da177e4 LT |
826 | static void run_init_process(char *init_filename) |
827 | { | |
828 | argv_init[0] = init_filename; | |
67608567 | 829 | kernel_execve(init_filename, argv_init, envp_init); |
1da177e4 LT |
830 | } |
831 | ||
ee5bfa64 VG |
832 | /* This is a non __init function. Force it to be noinline otherwise gcc |
833 | * makes it inline to init() and it becomes part of init.text section | |
834 | */ | |
f99ebf0a | 835 | static noinline int init_post(void) |
acdd052a | 836 | __releases(kernel_lock) |
ee5bfa64 | 837 | { |
22a9d645 AV |
838 | /* need to finish all async __init code before freeing the memory */ |
839 | async_synchronize_full(); | |
ee5bfa64 VG |
840 | free_initmem(); |
841 | unlock_kernel(); | |
842 | mark_rodata_ro(); | |
843 | system_state = SYSTEM_RUNNING; | |
844 | numa_default_policy(); | |
845 | ||
846 | if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) | |
847 | printk(KERN_WARNING "Warning: unable to open an initial console.\n"); | |
848 | ||
849 | (void) sys_dup(0); | |
850 | (void) sys_dup(0); | |
851 | ||
fae5fa44 ON |
852 | current->signal->flags |= SIGNAL_UNKILLABLE; |
853 | ||
ee5bfa64 VG |
854 | if (ramdisk_execute_command) { |
855 | run_init_process(ramdisk_execute_command); | |
856 | printk(KERN_WARNING "Failed to execute %s\n", | |
857 | ramdisk_execute_command); | |
858 | } | |
859 | ||
860 | /* | |
861 | * We try each of these until one succeeds. | |
862 | * | |
863 | * The Bourne shell can be used instead of init if we are | |
864 | * trying to recover a really broken machine. | |
865 | */ | |
866 | if (execute_command) { | |
867 | run_init_process(execute_command); | |
868 | printk(KERN_WARNING "Failed to execute %s. Attempting " | |
869 | "defaults...\n", execute_command); | |
870 | } | |
871 | run_init_process("/sbin/init"); | |
872 | run_init_process("/etc/init"); | |
873 | run_init_process("/bin/init"); | |
874 | run_init_process("/bin/sh"); | |
875 | ||
876 | panic("No init found. Try passing init= option to kernel."); | |
877 | } | |
878 | ||
aae5f662 | 879 | static int __init kernel_init(void * unused) |
1da177e4 LT |
880 | { |
881 | lock_kernel(); | |
58568d2a MX |
882 | |
883 | /* | |
884 | * init can allocate pages on any node | |
885 | */ | |
886 | set_mems_allowed(node_possible_map); | |
1da177e4 LT |
887 | /* |
888 | * init can run on any cpu. | |
889 | */ | |
1a2142af | 890 | set_cpus_allowed_ptr(current, cpu_all_mask); |
1da177e4 LT |
891 | /* |
892 | * Tell the world that we're going to be the grim | |
893 | * reaper of innocent orphaned children. | |
894 | * | |
895 | * We don't want people to have to make incorrect | |
896 | * assumptions about where in the task array this | |
897 | * can be found. | |
898 | */ | |
84d73786 | 899 | init_pid_ns.child_reaper = current; |
1da177e4 | 900 | |
9ec52099 CLG |
901 | cad_pid = task_pid(current); |
902 | ||
ca74a6f8 | 903 | smp_prepare_cpus(setup_max_cpus); |
1da177e4 LT |
904 | |
905 | do_pre_smp_initcalls(); | |
3bf77af6 | 906 | start_boot_trace(); |
1da177e4 | 907 | |
1da177e4 LT |
908 | smp_init(); |
909 | sched_init_smp(); | |
910 | ||
1da177e4 LT |
911 | do_basic_setup(); |
912 | ||
913 | /* | |
914 | * check if there is an early userspace init. If yes, let it do all | |
915 | * the work | |
916 | */ | |
ffdfc409 OJ |
917 | |
918 | if (!ramdisk_execute_command) | |
919 | ramdisk_execute_command = "/init"; | |
920 | ||
921 | if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { | |
922 | ramdisk_execute_command = NULL; | |
1da177e4 | 923 | prepare_namespace(); |
ffdfc409 | 924 | } |
1da177e4 LT |
925 | |
926 | /* | |
927 | * Ok, we have completed the initial bootup, and | |
928 | * we're essentially up and running. Get rid of the | |
929 | * initmem segments and start the user-mode stuff.. | |
930 | */ | |
71566a0d | 931 | |
ee5bfa64 VG |
932 | init_post(); |
933 | return 0; | |
1da177e4 | 934 | } |