]>
Commit | Line | Data |
---|---|---|
9c92ab61 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3e2a4c18 KC |
2 | /* |
3 | * Kernel module for testing copy_to/from_user infrastructure. | |
4 | * | |
5 | * Copyright 2013 Google Inc. All Rights Reserved | |
6 | * | |
7 | * Authors: | |
8 | * Kees Cook <[email protected]> | |
3e2a4c18 KC |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | ||
13 | #include <linux/mman.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/uaccess.h> | |
18 | #include <linux/vmalloc.h> | |
19 | ||
4c5d7bc6 KC |
20 | /* |
21 | * Several 32-bit architectures support 64-bit {get,put}_user() calls. | |
22 | * As there doesn't appear to be anything that can safely determine | |
23 | * their capability at compile-time, we just have to opt-out certain archs. | |
24 | */ | |
4deaa6fd | 25 | #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ |
4c5d7bc6 KC |
26 | !defined(CONFIG_M68K) && \ |
27 | !defined(CONFIG_MICROBLAZE) && \ | |
4c5d7bc6 KC |
28 | !defined(CONFIG_NIOS2) && \ |
29 | !defined(CONFIG_PPC32) && \ | |
30 | !defined(CONFIG_SUPERH)) | |
31 | # define TEST_U64 | |
32 | #endif | |
33 | ||
f5a1a536 AS |
34 | #define test(condition, msg, ...) \ |
35 | ({ \ | |
36 | int cond = (condition); \ | |
37 | if (cond) \ | |
38 | pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \ | |
39 | cond; \ | |
3e2a4c18 KC |
40 | }) |
41 | ||
f5a1a536 AS |
42 | static bool is_zeroed(void *from, size_t size) |
43 | { | |
44 | return memchr_inv(from, 0x0, size) == NULL; | |
45 | } | |
46 | ||
47 | static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) | |
48 | { | |
49 | int ret = 0; | |
f418dddf ME |
50 | size_t start, end, i, zero_start, zero_end; |
51 | ||
52 | if (test(size < 2 * PAGE_SIZE, "buffer too small")) | |
53 | return -EINVAL; | |
54 | ||
55 | /* | |
56 | * We want to cross a page boundary to exercise the code more | |
57 | * effectively. We also don't want to make the size we scan too large, | |
58 | * otherwise the test can take a long time and cause soft lockups. So | |
59 | * scan a 1024 byte region across the page boundary. | |
60 | */ | |
61 | size = 1024; | |
62 | start = PAGE_SIZE - (size / 2); | |
63 | ||
64 | kmem += start; | |
65 | umem += start; | |
66 | ||
67 | zero_start = size / 4; | |
68 | zero_end = size - zero_start; | |
f5a1a536 AS |
69 | |
70 | /* | |
c90012ac AS |
71 | * We conduct a series of check_nonzero_user() tests on a block of |
72 | * memory with the following byte-pattern (trying every possible | |
73 | * [start,end] pair): | |
f5a1a536 AS |
74 | * |
75 | * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] | |
76 | * | |
c90012ac AS |
77 | * And we verify that check_nonzero_user() acts identically to |
78 | * memchr_inv(). | |
f5a1a536 AS |
79 | */ |
80 | ||
81 | memset(kmem, 0x0, size); | |
82 | for (i = 1; i < zero_start; i += 2) | |
83 | kmem[i] = 0xff; | |
84 | for (i = zero_end; i < size; i += 2) | |
85 | kmem[i] = 0xff; | |
86 | ||
87 | ret |= test(copy_to_user(umem, kmem, size), | |
88 | "legitimate copy_to_user failed"); | |
89 | ||
90 | for (start = 0; start <= size; start++) { | |
91 | for (end = start; end <= size; end++) { | |
92 | size_t len = end - start; | |
93 | int retval = check_zeroed_user(umem + start, len); | |
94 | int expected = is_zeroed(kmem + start, len); | |
95 | ||
96 | ret |= test(retval != expected, | |
97 | "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)", | |
98 | retval, expected, start, end); | |
99 | } | |
100 | } | |
101 | ||
102 | return ret; | |
103 | } | |
104 | ||
105 | static int test_copy_struct_from_user(char *kmem, char __user *umem, | |
106 | size_t size) | |
107 | { | |
108 | int ret = 0; | |
109 | char *umem_src = NULL, *expected = NULL; | |
110 | size_t ksize, usize; | |
111 | ||
112 | umem_src = kmalloc(size, GFP_KERNEL); | |
c90012ac AS |
113 | ret = test(umem_src == NULL, "kmalloc failed"); |
114 | if (ret) | |
f5a1a536 AS |
115 | goto out_free; |
116 | ||
117 | expected = kmalloc(size, GFP_KERNEL); | |
c90012ac AS |
118 | ret = test(expected == NULL, "kmalloc failed"); |
119 | if (ret) | |
f5a1a536 AS |
120 | goto out_free; |
121 | ||
122 | /* Fill umem with a fixed byte pattern. */ | |
123 | memset(umem_src, 0x3e, size); | |
124 | ret |= test(copy_to_user(umem, umem_src, size), | |
125 | "legitimate copy_to_user failed"); | |
126 | ||
127 | /* Check basic case -- (usize == ksize). */ | |
128 | ksize = size; | |
129 | usize = size; | |
130 | ||
131 | memcpy(expected, umem_src, ksize); | |
132 | ||
133 | memset(kmem, 0x0, size); | |
134 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), | |
135 | "copy_struct_from_user(usize == ksize) failed"); | |
136 | ret |= test(memcmp(kmem, expected, ksize), | |
137 | "copy_struct_from_user(usize == ksize) gives unexpected copy"); | |
138 | ||
139 | /* Old userspace case -- (usize < ksize). */ | |
140 | ksize = size; | |
141 | usize = size / 2; | |
142 | ||
143 | memcpy(expected, umem_src, usize); | |
144 | memset(expected + usize, 0x0, ksize - usize); | |
145 | ||
146 | memset(kmem, 0x0, size); | |
147 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), | |
148 | "copy_struct_from_user(usize < ksize) failed"); | |
149 | ret |= test(memcmp(kmem, expected, ksize), | |
150 | "copy_struct_from_user(usize < ksize) gives unexpected copy"); | |
151 | ||
152 | /* New userspace (-E2BIG) case -- (usize > ksize). */ | |
153 | ksize = size / 2; | |
154 | usize = size; | |
155 | ||
156 | memset(kmem, 0x0, size); | |
157 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, | |
158 | "copy_struct_from_user(usize > ksize) didn't give E2BIG"); | |
159 | ||
160 | /* New userspace (success) case -- (usize > ksize). */ | |
161 | ksize = size / 2; | |
162 | usize = size; | |
163 | ||
164 | memcpy(expected, umem_src, ksize); | |
165 | ret |= test(clear_user(umem + ksize, usize - ksize), | |
166 | "legitimate clear_user failed"); | |
167 | ||
168 | memset(kmem, 0x0, size); | |
169 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), | |
170 | "copy_struct_from_user(usize > ksize) failed"); | |
171 | ret |= test(memcmp(kmem, expected, ksize), | |
172 | "copy_struct_from_user(usize > ksize) gives unexpected copy"); | |
173 | ||
174 | out_free: | |
175 | kfree(expected); | |
176 | kfree(umem_src); | |
177 | return ret; | |
178 | } | |
179 | ||
3e2a4c18 KC |
180 | static int __init test_user_copy_init(void) |
181 | { | |
182 | int ret = 0; | |
183 | char *kmem; | |
184 | char __user *usermem; | |
185 | char *bad_usermem; | |
186 | unsigned long user_addr; | |
4c5d7bc6 KC |
187 | u8 val_u8; |
188 | u16 val_u16; | |
189 | u32 val_u32; | |
190 | #ifdef TEST_U64 | |
191 | u64 val_u64; | |
192 | #endif | |
3e2a4c18 KC |
193 | |
194 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); | |
195 | if (!kmem) | |
196 | return -ENOMEM; | |
197 | ||
198 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, | |
199 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
200 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
201 | if (user_addr >= (unsigned long)(TASK_SIZE)) { | |
202 | pr_warn("Failed to allocate user memory\n"); | |
203 | kfree(kmem); | |
204 | return -ENOMEM; | |
205 | } | |
206 | ||
207 | usermem = (char __user *)user_addr; | |
208 | bad_usermem = (char *)user_addr; | |
209 | ||
f5f893c5 KC |
210 | /* |
211 | * Legitimate usage: none of these copies should fail. | |
212 | */ | |
4c5d7bc6 | 213 | memset(kmem, 0x3a, PAGE_SIZE * 2); |
3e2a4c18 KC |
214 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), |
215 | "legitimate copy_to_user failed"); | |
4c5d7bc6 KC |
216 | memset(kmem, 0x0, PAGE_SIZE); |
217 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), | |
218 | "legitimate copy_from_user failed"); | |
219 | ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE), | |
220 | "legitimate usercopy failed to copy data"); | |
221 | ||
222 | #define test_legit(size, check) \ | |
223 | do { \ | |
224 | val_##size = check; \ | |
225 | ret |= test(put_user(val_##size, (size __user *)usermem), \ | |
226 | "legitimate put_user (" #size ") failed"); \ | |
227 | val_##size = 0; \ | |
228 | ret |= test(get_user(val_##size, (size __user *)usermem), \ | |
229 | "legitimate get_user (" #size ") failed"); \ | |
230 | ret |= test(val_##size != check, \ | |
231 | "legitimate get_user (" #size ") failed to do copy"); \ | |
232 | if (val_##size != check) { \ | |
233 | pr_info("0x%llx != 0x%llx\n", \ | |
234 | (unsigned long long)val_##size, \ | |
235 | (unsigned long long)check); \ | |
236 | } \ | |
237 | } while (0) | |
238 | ||
239 | test_legit(u8, 0x5a); | |
240 | test_legit(u16, 0x5a5b); | |
241 | test_legit(u32, 0x5a5b5c5d); | |
242 | #ifdef TEST_U64 | |
243 | test_legit(u64, 0x5a5b5c5d6a6b6c6d); | |
244 | #endif | |
245 | #undef test_legit | |
3e2a4c18 | 246 | |
f5a1a536 AS |
247 | /* Test usage of check_nonzero_user(). */ |
248 | ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE); | |
249 | /* Test usage of copy_struct_from_user(). */ | |
250 | ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE); | |
251 | ||
f5f893c5 KC |
252 | /* |
253 | * Invalid usage: none of these copies should succeed. | |
254 | */ | |
255 | ||
256 | /* Prepare kernel memory with check values. */ | |
4fbfeb8b HR |
257 | memset(kmem, 0x5a, PAGE_SIZE); |
258 | memset(kmem + PAGE_SIZE, 0, PAGE_SIZE); | |
f5f893c5 KC |
259 | |
260 | /* Reject kernel-to-kernel copies through copy_from_user(). */ | |
3e2a4c18 KC |
261 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), |
262 | PAGE_SIZE), | |
263 | "illegal all-kernel copy_from_user passed"); | |
f5f893c5 KC |
264 | |
265 | /* Destination half of buffer should have been zeroed. */ | |
4fbfeb8b HR |
266 | ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE), |
267 | "zeroing failure for illegal all-kernel copy_from_user"); | |
f5f893c5 KC |
268 | |
269 | #if 0 | |
270 | /* | |
271 | * When running with SMAP/PAN/etc, this will Oops the kernel | |
272 | * due to the zeroing of userspace memory on failure. This needs | |
273 | * to be tested in LKDTM instead, since this test module does not | |
274 | * expect to explode. | |
275 | */ | |
3e2a4c18 KC |
276 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, |
277 | PAGE_SIZE), | |
278 | "illegal reversed copy_from_user passed"); | |
f5f893c5 | 279 | #endif |
3e2a4c18 KC |
280 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, |
281 | PAGE_SIZE), | |
282 | "illegal all-kernel copy_to_user passed"); | |
283 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, | |
284 | PAGE_SIZE), | |
285 | "illegal reversed copy_to_user passed"); | |
f5f893c5 | 286 | |
4c5d7bc6 KC |
287 | #define test_illegal(size, check) \ |
288 | do { \ | |
289 | val_##size = (check); \ | |
290 | ret |= test(!get_user(val_##size, (size __user *)kmem), \ | |
291 | "illegal get_user (" #size ") passed"); \ | |
292 | ret |= test(val_##size != (size)0, \ | |
293 | "zeroing failure for illegal get_user (" #size ")"); \ | |
294 | if (val_##size != (size)0) { \ | |
295 | pr_info("0x%llx != 0\n", \ | |
296 | (unsigned long long)val_##size); \ | |
297 | } \ | |
298 | ret |= test(!put_user(val_##size, (size __user *)kmem), \ | |
299 | "illegal put_user (" #size ") passed"); \ | |
300 | } while (0) | |
301 | ||
302 | test_illegal(u8, 0x5a); | |
303 | test_illegal(u16, 0x5a5b); | |
304 | test_illegal(u32, 0x5a5b5c5d); | |
305 | #ifdef TEST_U64 | |
306 | test_illegal(u64, 0x5a5b5c5d6a6b6c6d); | |
307 | #endif | |
308 | #undef test_illegal | |
3e2a4c18 KC |
309 | |
310 | vm_munmap(user_addr, PAGE_SIZE * 2); | |
311 | kfree(kmem); | |
312 | ||
313 | if (ret == 0) { | |
314 | pr_info("tests passed.\n"); | |
315 | return 0; | |
316 | } | |
317 | ||
318 | return -EINVAL; | |
319 | } | |
320 | ||
321 | module_init(test_user_copy_init); | |
322 | ||
323 | static void __exit test_user_copy_exit(void) | |
324 | { | |
325 | pr_info("unloaded.\n"); | |
326 | } | |
327 | ||
328 | module_exit(test_user_copy_exit); | |
329 | ||
330 | MODULE_AUTHOR("Kees Cook <[email protected]>"); | |
331 | MODULE_LICENSE("GPL"); |