]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Test cases for SL[AOU]B/page initialization at alloc/free time. | |
4 | */ | |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
6 | ||
7 | #include <linux/init.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/vmalloc.h> | |
14 | ||
15 | #define GARBAGE_INT (0x09A7BA9E) | |
16 | #define GARBAGE_BYTE (0x9E) | |
17 | ||
18 | #define REPORT_FAILURES_IN_FN() \ | |
19 | do { \ | |
20 | if (failures) \ | |
21 | pr_info("%s failed %d out of %d times\n", \ | |
22 | __func__, failures, num_tests); \ | |
23 | else \ | |
24 | pr_info("all %d tests in %s passed\n", \ | |
25 | num_tests, __func__); \ | |
26 | } while (0) | |
27 | ||
28 | /* Calculate the number of uninitialized bytes in the buffer. */ | |
29 | static int __init count_nonzero_bytes(void *ptr, size_t size) | |
30 | { | |
31 | int i, ret = 0; | |
32 | unsigned char *p = (unsigned char *)ptr; | |
33 | ||
34 | for (i = 0; i < size; i++) | |
35 | if (p[i]) | |
36 | ret++; | |
37 | return ret; | |
38 | } | |
39 | ||
40 | /* Fill a buffer with garbage, skipping |skip| first bytes. */ | |
41 | static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip) | |
42 | { | |
43 | unsigned int *p = (unsigned int *)((char *)ptr + skip); | |
44 | int i = 0; | |
45 | ||
46 | WARN_ON(skip > size); | |
47 | size -= skip; | |
48 | ||
49 | while (size >= sizeof(*p)) { | |
50 | p[i] = GARBAGE_INT; | |
51 | i++; | |
52 | size -= sizeof(*p); | |
53 | } | |
54 | if (size) | |
55 | memset(&p[i], GARBAGE_BYTE, size); | |
56 | } | |
57 | ||
58 | static void __init fill_with_garbage(void *ptr, size_t size) | |
59 | { | |
60 | fill_with_garbage_skip(ptr, size, 0); | |
61 | } | |
62 | ||
63 | static int __init do_alloc_pages_order(int order, int *total_failures) | |
64 | { | |
65 | struct page *page; | |
66 | void *buf; | |
67 | size_t size = PAGE_SIZE << order; | |
68 | ||
69 | page = alloc_pages(GFP_KERNEL, order); | |
70 | buf = page_address(page); | |
71 | fill_with_garbage(buf, size); | |
72 | __free_pages(page, order); | |
73 | ||
74 | page = alloc_pages(GFP_KERNEL, order); | |
75 | buf = page_address(page); | |
76 | if (count_nonzero_bytes(buf, size)) | |
77 | (*total_failures)++; | |
78 | fill_with_garbage(buf, size); | |
79 | __free_pages(page, order); | |
80 | return 1; | |
81 | } | |
82 | ||
83 | /* Test the page allocator by calling alloc_pages with different orders. */ | |
84 | static int __init test_pages(int *total_failures) | |
85 | { | |
86 | int failures = 0, num_tests = 0; | |
87 | int i; | |
88 | ||
89 | for (i = 0; i < 10; i++) | |
90 | num_tests += do_alloc_pages_order(i, &failures); | |
91 | ||
92 | REPORT_FAILURES_IN_FN(); | |
93 | *total_failures += failures; | |
94 | return num_tests; | |
95 | } | |
96 | ||
97 | /* Test kmalloc() with given parameters. */ | |
98 | static int __init do_kmalloc_size(size_t size, int *total_failures) | |
99 | { | |
100 | void *buf; | |
101 | ||
102 | buf = kmalloc(size, GFP_KERNEL); | |
103 | fill_with_garbage(buf, size); | |
104 | kfree(buf); | |
105 | ||
106 | buf = kmalloc(size, GFP_KERNEL); | |
107 | if (count_nonzero_bytes(buf, size)) | |
108 | (*total_failures)++; | |
109 | fill_with_garbage(buf, size); | |
110 | kfree(buf); | |
111 | return 1; | |
112 | } | |
113 | ||
114 | /* Test vmalloc() with given parameters. */ | |
115 | static int __init do_vmalloc_size(size_t size, int *total_failures) | |
116 | { | |
117 | void *buf; | |
118 | ||
119 | buf = vmalloc(size); | |
120 | fill_with_garbage(buf, size); | |
121 | vfree(buf); | |
122 | ||
123 | buf = vmalloc(size); | |
124 | if (count_nonzero_bytes(buf, size)) | |
125 | (*total_failures)++; | |
126 | fill_with_garbage(buf, size); | |
127 | vfree(buf); | |
128 | return 1; | |
129 | } | |
130 | ||
131 | /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */ | |
132 | static int __init test_kvmalloc(int *total_failures) | |
133 | { | |
134 | int failures = 0, num_tests = 0; | |
135 | int i, size; | |
136 | ||
137 | for (i = 0; i < 20; i++) { | |
138 | size = 1 << i; | |
139 | num_tests += do_kmalloc_size(size, &failures); | |
140 | num_tests += do_vmalloc_size(size, &failures); | |
141 | } | |
142 | ||
143 | REPORT_FAILURES_IN_FN(); | |
144 | *total_failures += failures; | |
145 | return num_tests; | |
146 | } | |
147 | ||
148 | #define CTOR_BYTES (sizeof(unsigned int)) | |
149 | #define CTOR_PATTERN (0x41414141) | |
150 | /* Initialize the first 4 bytes of the object. */ | |
151 | static void test_ctor(void *obj) | |
152 | { | |
153 | *(unsigned int *)obj = CTOR_PATTERN; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Check the invariants for the buffer allocated from a slab cache. | |
158 | * If the cache has a test constructor, the first 4 bytes of the object must | |
159 | * always remain equal to CTOR_PATTERN. | |
160 | * If the cache isn't an RCU-typesafe one, or if the allocation is done with | |
161 | * __GFP_ZERO, then the object contents must be zeroed after allocation. | |
162 | * If the cache is an RCU-typesafe one, the object contents must never be | |
163 | * zeroed after the first use. This is checked by memcmp() in | |
164 | * do_kmem_cache_size(). | |
165 | */ | |
166 | static bool __init check_buf(void *buf, int size, bool want_ctor, | |
167 | bool want_rcu, bool want_zero) | |
168 | { | |
169 | int bytes; | |
170 | bool fail = false; | |
171 | ||
172 | bytes = count_nonzero_bytes(buf, size); | |
173 | WARN_ON(want_ctor && want_zero); | |
174 | if (want_zero) | |
175 | return bytes; | |
176 | if (want_ctor) { | |
177 | if (*(unsigned int *)buf != CTOR_PATTERN) | |
178 | fail = 1; | |
179 | } else { | |
180 | if (bytes) | |
181 | fail = !want_rcu; | |
182 | } | |
183 | return fail; | |
184 | } | |
185 | ||
186 | #define BULK_SIZE 100 | |
187 | static void *bulk_array[BULK_SIZE]; | |
188 | ||
189 | /* | |
190 | * Test kmem_cache with given parameters: | |
191 | * want_ctor - use a constructor; | |
192 | * want_rcu - use SLAB_TYPESAFE_BY_RCU; | |
193 | * want_zero - use __GFP_ZERO. | |
194 | */ | |
195 | static int __init do_kmem_cache_size(size_t size, bool want_ctor, | |
196 | bool want_rcu, bool want_zero, | |
197 | int *total_failures) | |
198 | { | |
199 | struct kmem_cache *c; | |
200 | int iter; | |
201 | bool fail = false; | |
202 | gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0); | |
203 | void *buf, *buf_copy; | |
204 | ||
205 | c = kmem_cache_create("test_cache", size, 1, | |
206 | want_rcu ? SLAB_TYPESAFE_BY_RCU : 0, | |
207 | want_ctor ? test_ctor : NULL); | |
208 | for (iter = 0; iter < 10; iter++) { | |
209 | /* Do a test of bulk allocations */ | |
210 | if (!want_rcu && !want_ctor) { | |
211 | int ret; | |
212 | ||
213 | ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array); | |
214 | if (!ret) { | |
215 | fail = true; | |
216 | } else { | |
217 | int i; | |
218 | for (i = 0; i < ret; i++) | |
219 | fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero); | |
220 | kmem_cache_free_bulk(c, ret, bulk_array); | |
221 | } | |
222 | } | |
223 | ||
224 | buf = kmem_cache_alloc(c, alloc_mask); | |
225 | /* Check that buf is zeroed, if it must be. */ | |
226 | fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero); | |
227 | fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0); | |
228 | ||
229 | if (!want_rcu) { | |
230 | kmem_cache_free(c, buf); | |
231 | continue; | |
232 | } | |
233 | ||
234 | /* | |
235 | * If this is an RCU cache, use a critical section to ensure we | |
236 | * can touch objects after they're freed. | |
237 | */ | |
238 | rcu_read_lock(); | |
239 | /* | |
240 | * Copy the buffer to check that it's not wiped on | |
241 | * free(). | |
242 | */ | |
243 | buf_copy = kmalloc(size, GFP_ATOMIC); | |
244 | if (buf_copy) | |
245 | memcpy(buf_copy, buf, size); | |
246 | ||
247 | kmem_cache_free(c, buf); | |
248 | /* | |
249 | * Check that |buf| is intact after kmem_cache_free(). | |
250 | * |want_zero| is false, because we wrote garbage to | |
251 | * the buffer already. | |
252 | */ | |
253 | fail |= check_buf(buf, size, want_ctor, want_rcu, | |
254 | false); | |
255 | if (buf_copy) { | |
256 | fail |= (bool)memcmp(buf, buf_copy, size); | |
257 | kfree(buf_copy); | |
258 | } | |
259 | rcu_read_unlock(); | |
260 | } | |
261 | kmem_cache_destroy(c); | |
262 | ||
263 | *total_failures += fail; | |
264 | return 1; | |
265 | } | |
266 | ||
267 | /* | |
268 | * Check that the data written to an RCU-allocated object survives | |
269 | * reallocation. | |
270 | */ | |
271 | static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) | |
272 | { | |
273 | struct kmem_cache *c; | |
274 | void *buf, *buf_contents, *saved_ptr; | |
275 | void **used_objects; | |
276 | int i, iter, maxiter = 1024; | |
277 | bool fail = false; | |
278 | ||
279 | c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU, | |
280 | NULL); | |
281 | buf = kmem_cache_alloc(c, GFP_KERNEL); | |
282 | saved_ptr = buf; | |
283 | fill_with_garbage(buf, size); | |
284 | buf_contents = kmalloc(size, GFP_KERNEL); | |
285 | if (!buf_contents) | |
286 | goto out; | |
287 | used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL); | |
288 | if (!used_objects) { | |
289 | kfree(buf_contents); | |
290 | goto out; | |
291 | } | |
292 | memcpy(buf_contents, buf, size); | |
293 | kmem_cache_free(c, buf); | |
294 | /* | |
295 | * Run for a fixed number of iterations. If we never hit saved_ptr, | |
296 | * assume the test passes. | |
297 | */ | |
298 | for (iter = 0; iter < maxiter; iter++) { | |
299 | buf = kmem_cache_alloc(c, GFP_KERNEL); | |
300 | used_objects[iter] = buf; | |
301 | if (buf == saved_ptr) { | |
302 | fail = memcmp(buf_contents, buf, size); | |
303 | for (i = 0; i <= iter; i++) | |
304 | kmem_cache_free(c, used_objects[i]); | |
305 | goto free_out; | |
306 | } | |
307 | } | |
308 | ||
309 | free_out: | |
310 | kmem_cache_destroy(c); | |
311 | kfree(buf_contents); | |
312 | kfree(used_objects); | |
313 | out: | |
314 | *total_failures += fail; | |
315 | return 1; | |
316 | } | |
317 | ||
318 | static int __init do_kmem_cache_size_bulk(int size, int *total_failures) | |
319 | { | |
320 | struct kmem_cache *c; | |
321 | int i, iter, maxiter = 1024; | |
322 | int num, bytes; | |
323 | bool fail = false; | |
324 | void *objects[10]; | |
325 | ||
326 | c = kmem_cache_create("test_cache", size, size, 0, NULL); | |
327 | for (iter = 0; (iter < maxiter) && !fail; iter++) { | |
328 | num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects), | |
329 | objects); | |
330 | for (i = 0; i < num; i++) { | |
331 | bytes = count_nonzero_bytes(objects[i], size); | |
332 | if (bytes) | |
333 | fail = true; | |
334 | fill_with_garbage(objects[i], size); | |
335 | } | |
336 | ||
337 | if (num) | |
338 | kmem_cache_free_bulk(c, num, objects); | |
339 | } | |
340 | *total_failures += fail; | |
341 | return 1; | |
342 | } | |
343 | ||
344 | /* | |
345 | * Test kmem_cache allocation by creating caches of different sizes, with and | |
346 | * without constructors, with and without SLAB_TYPESAFE_BY_RCU. | |
347 | */ | |
348 | static int __init test_kmemcache(int *total_failures) | |
349 | { | |
350 | int failures = 0, num_tests = 0; | |
351 | int i, flags, size; | |
352 | bool ctor, rcu, zero; | |
353 | ||
354 | for (i = 0; i < 10; i++) { | |
355 | size = 8 << i; | |
356 | for (flags = 0; flags < 8; flags++) { | |
357 | ctor = flags & 1; | |
358 | rcu = flags & 2; | |
359 | zero = flags & 4; | |
360 | if (ctor & zero) | |
361 | continue; | |
362 | num_tests += do_kmem_cache_size(size, ctor, rcu, zero, | |
363 | &failures); | |
364 | } | |
365 | num_tests += do_kmem_cache_size_bulk(size, &failures); | |
366 | } | |
367 | REPORT_FAILURES_IN_FN(); | |
368 | *total_failures += failures; | |
369 | return num_tests; | |
370 | } | |
371 | ||
372 | /* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */ | |
373 | static int __init test_rcu_persistent(int *total_failures) | |
374 | { | |
375 | int failures = 0, num_tests = 0; | |
376 | int i, size; | |
377 | ||
378 | for (i = 0; i < 10; i++) { | |
379 | size = 8 << i; | |
380 | num_tests += do_kmem_cache_rcu_persistent(size, &failures); | |
381 | } | |
382 | REPORT_FAILURES_IN_FN(); | |
383 | *total_failures += failures; | |
384 | return num_tests; | |
385 | } | |
386 | ||
387 | /* | |
388 | * Run the tests. Each test function returns the number of executed tests and | |
389 | * updates |failures| with the number of failed tests. | |
390 | */ | |
391 | static int __init test_meminit_init(void) | |
392 | { | |
393 | int failures = 0, num_tests = 0; | |
394 | ||
395 | num_tests += test_pages(&failures); | |
396 | num_tests += test_kvmalloc(&failures); | |
397 | num_tests += test_kmemcache(&failures); | |
398 | num_tests += test_rcu_persistent(&failures); | |
399 | ||
400 | if (failures == 0) | |
401 | pr_info("all %d tests passed!\n", num_tests); | |
402 | else | |
403 | pr_info("failures: %d out of %d\n", failures, num_tests); | |
404 | ||
405 | return failures ? -EINVAL : 0; | |
406 | } | |
407 | module_init(test_meminit_init); | |
408 | ||
409 | MODULE_LICENSE("GPL"); |