]>
Commit | Line | Data |
---|---|---|
40b0b3f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
dc009d92 | 2 | /* |
2965faa5 | 3 | * kexec.c - kexec_load system call |
dc009d92 | 4 | * Copyright (C) 2002-2004 Eric Biederman <[email protected]> |
dc009d92 EB |
5 | */ |
6 | ||
de90a6bc MH |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | ||
c59ede7b | 9 | #include <linux/capability.h> |
dc009d92 EB |
10 | #include <linux/mm.h> |
11 | #include <linux/file.h> | |
a210fd32 | 12 | #include <linux/security.h> |
dc009d92 | 13 | #include <linux/kexec.h> |
8c5a1cf0 | 14 | #include <linux/mutex.h> |
dc009d92 | 15 | #include <linux/list.h> |
dc009d92 | 16 | #include <linux/syscalls.h> |
a43cac0d | 17 | #include <linux/vmalloc.h> |
2965faa5 | 18 | #include <linux/slab.h> |
dc009d92 | 19 | |
a43cac0d DY |
20 | #include "kexec_internal.h" |
21 | ||
dabe7862 VG |
22 | static int copy_user_segment_list(struct kimage *image, |
23 | unsigned long nr_segments, | |
24 | struct kexec_segment __user *segments) | |
dc009d92 | 25 | { |
dabe7862 | 26 | int ret; |
dc009d92 | 27 | size_t segment_bytes; |
dc009d92 EB |
28 | |
29 | /* Read in the segments */ | |
30 | image->nr_segments = nr_segments; | |
31 | segment_bytes = nr_segments * sizeof(*segments); | |
dabe7862 VG |
32 | ret = copy_from_user(image->segment, segments, segment_bytes); |
33 | if (ret) | |
34 | ret = -EFAULT; | |
35 | ||
36 | return ret; | |
37 | } | |
38 | ||
255aedd9 VG |
39 | static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, |
40 | unsigned long nr_segments, | |
41 | struct kexec_segment __user *segments, | |
42 | unsigned long flags) | |
dc009d92 | 43 | { |
255aedd9 | 44 | int ret; |
dc009d92 | 45 | struct kimage *image; |
255aedd9 VG |
46 | bool kexec_on_panic = flags & KEXEC_ON_CRASH; |
47 | ||
48 | if (kexec_on_panic) { | |
49 | /* Verify we have a valid entry point */ | |
43546d86 RK |
50 | if ((entry < phys_to_boot_phys(crashk_res.start)) || |
51 | (entry > phys_to_boot_phys(crashk_res.end))) | |
255aedd9 VG |
52 | return -EADDRNOTAVAIL; |
53 | } | |
dc009d92 EB |
54 | |
55 | /* Allocate and initialize a controlling structure */ | |
dabe7862 VG |
56 | image = do_kimage_alloc_init(); |
57 | if (!image) | |
58 | return -ENOMEM; | |
59 | ||
60 | image->start = entry; | |
61 | ||
255aedd9 VG |
62 | ret = copy_user_segment_list(image, nr_segments, segments); |
63 | if (ret) | |
dabe7862 VG |
64 | goto out_free_image; |
65 | ||
255aedd9 | 66 | if (kexec_on_panic) { |
cdf4b3fa | 67 | /* Enable special crash kernel control page alloc policy. */ |
255aedd9 VG |
68 | image->control_page = crashk_res.start; |
69 | image->type = KEXEC_TYPE_CRASH; | |
70 | } | |
71 | ||
cdf4b3fa XP |
72 | ret = sanity_check_segment_list(image); |
73 | if (ret) | |
74 | goto out_free_image; | |
75 | ||
dc009d92 EB |
76 | /* |
77 | * Find a location for the control code buffer, and add it | |
78 | * the vector of segments so that it's pages will also be | |
79 | * counted as destination pages. | |
80 | */ | |
255aedd9 | 81 | ret = -ENOMEM; |
dc009d92 | 82 | image->control_code_page = kimage_alloc_control_pages(image, |
163f6876 | 83 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
dc009d92 | 84 | if (!image->control_code_page) { |
e1bebcf4 | 85 | pr_err("Could not allocate control_code_buffer\n"); |
dabe7862 | 86 | goto out_free_image; |
dc009d92 EB |
87 | } |
88 | ||
255aedd9 VG |
89 | if (!kexec_on_panic) { |
90 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
91 | if (!image->swap_page) { | |
92 | pr_err("Could not allocate swap buffer\n"); | |
93 | goto out_free_control_pages; | |
94 | } | |
3ab83521 YH |
95 | } |
96 | ||
b92e7e0d ZY |
97 | *rimage = image; |
98 | return 0; | |
dabe7862 | 99 | out_free_control_pages: |
b92e7e0d | 100 | kimage_free_page_list(&image->control_pages); |
dabe7862 | 101 | out_free_image: |
b92e7e0d | 102 | kfree(image); |
255aedd9 | 103 | return ret; |
dc009d92 EB |
104 | } |
105 | ||
0eea0867 MH |
106 | static int do_kexec_load(unsigned long entry, unsigned long nr_segments, |
107 | struct kexec_segment __user *segments, unsigned long flags) | |
108 | { | |
109 | struct kimage **dest_image, *image; | |
110 | unsigned long i; | |
111 | int ret; | |
112 | ||
113 | if (flags & KEXEC_ON_CRASH) { | |
114 | dest_image = &kexec_crash_image; | |
115 | if (kexec_crash_image) | |
116 | arch_kexec_unprotect_crashkres(); | |
117 | } else { | |
118 | dest_image = &kexec_image; | |
119 | } | |
120 | ||
121 | if (nr_segments == 0) { | |
122 | /* Uninstall image */ | |
123 | kimage_free(xchg(dest_image, NULL)); | |
124 | return 0; | |
125 | } | |
126 | if (flags & KEXEC_ON_CRASH) { | |
127 | /* | |
128 | * Loading another kernel to switch to if this one | |
129 | * crashes. Free any current crash dump kernel before | |
130 | * we corrupt it. | |
131 | */ | |
132 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
133 | } | |
134 | ||
135 | ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); | |
136 | if (ret) | |
137 | return ret; | |
138 | ||
0eea0867 MH |
139 | if (flags & KEXEC_PRESERVE_CONTEXT) |
140 | image->preserve_context = 1; | |
141 | ||
142 | ret = machine_kexec_prepare(image); | |
143 | if (ret) | |
144 | goto out; | |
145 | ||
1229384f XP |
146 | /* |
147 | * Some architecture(like S390) may touch the crash memory before | |
148 | * machine_kexec_prepare(), we must copy vmcoreinfo data after it. | |
149 | */ | |
150 | ret = kimage_crash_copy_vmcoreinfo(image); | |
151 | if (ret) | |
152 | goto out; | |
153 | ||
0eea0867 MH |
154 | for (i = 0; i < nr_segments; i++) { |
155 | ret = kimage_load_segment(image, &image->segment[i]); | |
156 | if (ret) | |
157 | goto out; | |
158 | } | |
159 | ||
160 | kimage_terminate(image); | |
161 | ||
de68e4da PT |
162 | ret = machine_kexec_post_load(image); |
163 | if (ret) | |
164 | goto out; | |
165 | ||
0eea0867 MH |
166 | /* Install the new kernel and uninstall the old */ |
167 | image = xchg(dest_image, image); | |
168 | ||
169 | out: | |
170 | if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) | |
171 | arch_kexec_protect_crashkres(); | |
172 | ||
0eea0867 MH |
173 | kimage_free(image); |
174 | return ret; | |
175 | } | |
176 | ||
dc009d92 EB |
177 | /* |
178 | * Exec Kernel system call: for obvious reasons only root may call it. | |
179 | * | |
180 | * This call breaks up into three pieces. | |
181 | * - A generic part which loads the new kernel from the current | |
182 | * address space, and very carefully places the data in the | |
183 | * allocated pages. | |
184 | * | |
185 | * - A generic part that interacts with the kernel and tells all of | |
186 | * the devices to shut down. Preventing on-going dmas, and placing | |
187 | * the devices in a consistent state so a later kernel can | |
188 | * reinitialize them. | |
189 | * | |
190 | * - A machine specific part that includes the syscall number | |
002ace78 | 191 | * and then copies the image to it's final destination. And |
dc009d92 EB |
192 | * jumps into the image at entry. |
193 | * | |
194 | * kexec does not sync, or unmount filesystems so if you need | |
195 | * that to happen you need to do that yourself. | |
196 | */ | |
8c5a1cf0 | 197 | |
6b27aef0 DB |
198 | static inline int kexec_load_check(unsigned long nr_segments, |
199 | unsigned long flags) | |
dc009d92 | 200 | { |
a210fd32 MZ |
201 | int result; |
202 | ||
dc009d92 | 203 | /* We only trust the superuser with rebooting the system. */ |
7984754b | 204 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) |
dc009d92 EB |
205 | return -EPERM; |
206 | ||
a210fd32 | 207 | /* Permit LSMs and IMA to fail the kexec */ |
b64fcae7 | 208 | result = security_kernel_load_data(LOADING_KEXEC_IMAGE, false); |
a210fd32 MZ |
209 | if (result < 0) |
210 | return result; | |
211 | ||
7d31f460 MG |
212 | /* |
213 | * kexec can be used to circumvent module loading restrictions, so | |
214 | * prevent loading in that case | |
215 | */ | |
216 | result = security_locked_down(LOCKDOWN_KEXEC); | |
217 | if (result) | |
218 | return result; | |
219 | ||
dc009d92 EB |
220 | /* |
221 | * Verify we have a legal set of flags | |
222 | * This leaves us room for future extensions. | |
223 | */ | |
224 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
225 | return -EINVAL; | |
226 | ||
dc009d92 EB |
227 | /* Put an artificial cap on the number |
228 | * of segments passed to kexec_load. | |
229 | */ | |
230 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
231 | return -EINVAL; | |
232 | ||
6b27aef0 DB |
233 | return 0; |
234 | } | |
235 | ||
236 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, | |
237 | struct kexec_segment __user *, segments, unsigned long, flags) | |
238 | { | |
239 | int result; | |
240 | ||
241 | result = kexec_load_check(nr_segments, flags); | |
242 | if (result) | |
243 | return result; | |
244 | ||
245 | /* Verify we are on the appropriate architecture */ | |
246 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
247 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
248 | return -EINVAL; | |
249 | ||
dc009d92 EB |
250 | /* Because we write directly to the reserved memory |
251 | * region when loading crash kernels we need a mutex here to | |
252 | * prevent multiple crash kernels from attempting to load | |
253 | * simultaneously, and to prevent a crash kernel from loading | |
254 | * over the top of a in use crash kernel. | |
255 | * | |
256 | * KISS: always take the mutex. | |
257 | */ | |
8c5a1cf0 | 258 | if (!mutex_trylock(&kexec_mutex)) |
dc009d92 | 259 | return -EBUSY; |
72414d3f | 260 | |
0eea0867 | 261 | result = do_kexec_load(entry, nr_segments, segments, flags); |
dc009d92 | 262 | |
8c5a1cf0 | 263 | mutex_unlock(&kexec_mutex); |
72414d3f | 264 | |
dc009d92 EB |
265 | return result; |
266 | } | |
267 | ||
268 | #ifdef CONFIG_COMPAT | |
ca2c405a HC |
269 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
270 | compat_ulong_t, nr_segments, | |
271 | struct compat_kexec_segment __user *, segments, | |
272 | compat_ulong_t, flags) | |
dc009d92 EB |
273 | { |
274 | struct compat_kexec_segment in; | |
275 | struct kexec_segment out, __user *ksegments; | |
276 | unsigned long i, result; | |
277 | ||
6b27aef0 DB |
278 | result = kexec_load_check(nr_segments, flags); |
279 | if (result) | |
280 | return result; | |
281 | ||
dc009d92 EB |
282 | /* Don't allow clients that don't understand the native |
283 | * architecture to do anything. | |
284 | */ | |
72414d3f | 285 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
dc009d92 | 286 | return -EINVAL; |
dc009d92 | 287 | |
dc009d92 | 288 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); |
e1bebcf4 | 289 | for (i = 0; i < nr_segments; i++) { |
dc009d92 | 290 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
72414d3f | 291 | if (result) |
dc009d92 | 292 | return -EFAULT; |
dc009d92 EB |
293 | |
294 | out.buf = compat_ptr(in.buf); | |
295 | out.bufsz = in.bufsz; | |
296 | out.mem = in.mem; | |
297 | out.memsz = in.memsz; | |
298 | ||
299 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | |
72414d3f | 300 | if (result) |
dc009d92 | 301 | return -EFAULT; |
dc009d92 EB |
302 | } |
303 | ||
6b27aef0 DB |
304 | /* Because we write directly to the reserved memory |
305 | * region when loading crash kernels we need a mutex here to | |
306 | * prevent multiple crash kernels from attempting to load | |
307 | * simultaneously, and to prevent a crash kernel from loading | |
308 | * over the top of a in use crash kernel. | |
309 | * | |
310 | * KISS: always take the mutex. | |
311 | */ | |
312 | if (!mutex_trylock(&kexec_mutex)) | |
313 | return -EBUSY; | |
314 | ||
315 | result = do_kexec_load(entry, nr_segments, ksegments, flags); | |
316 | ||
317 | mutex_unlock(&kexec_mutex); | |
318 | ||
319 | return result; | |
dc009d92 EB |
320 | } |
321 | #endif |