]>
Commit | Line | Data |
---|---|---|
0b24becc AR |
1 | /* |
2 | * This file contains shadow memory manipulation code. | |
3 | * | |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
2baf9e89 | 5 | * Author: Andrey Ryabinin <[email protected]> |
0b24becc | 6 | * |
5d0926ef | 7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
0b24becc AR |
8 | * Andrey Konovalov <[email protected]> |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | */ | |
15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
17 | #define DISABLE_BRANCH_PROFILING | |
18 | ||
19 | #include <linux/export.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/memblock.h> | |
786a8959 | 23 | #include <linux/memory.h> |
0b24becc | 24 | #include <linux/mm.h> |
bebf56a1 | 25 | #include <linux/module.h> |
0b24becc AR |
26 | #include <linux/printk.h> |
27 | #include <linux/sched.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/stacktrace.h> | |
30 | #include <linux/string.h> | |
31 | #include <linux/types.h> | |
a5af5aa8 | 32 | #include <linux/vmalloc.h> |
0b24becc AR |
33 | #include <linux/kasan.h> |
34 | ||
35 | #include "kasan.h" | |
0316bec2 | 36 | #include "../slab.h" |
0b24becc AR |
37 | |
38 | /* | |
39 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. | |
40 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. | |
41 | */ | |
42 | static void kasan_poison_shadow(const void *address, size_t size, u8 value) | |
43 | { | |
44 | void *shadow_start, *shadow_end; | |
45 | ||
46 | shadow_start = kasan_mem_to_shadow(address); | |
47 | shadow_end = kasan_mem_to_shadow(address + size); | |
48 | ||
49 | memset(shadow_start, value, shadow_end - shadow_start); | |
50 | } | |
51 | ||
52 | void kasan_unpoison_shadow(const void *address, size_t size) | |
53 | { | |
54 | kasan_poison_shadow(address, size, 0); | |
55 | ||
56 | if (size & KASAN_SHADOW_MASK) { | |
57 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); | |
58 | *shadow = size & KASAN_SHADOW_MASK; | |
59 | } | |
60 | } | |
61 | ||
62 | ||
63 | /* | |
64 | * All functions below always inlined so compiler could | |
65 | * perform better optimizations in each of __asan_loadX/__assn_storeX | |
66 | * depending on memory access size X. | |
67 | */ | |
68 | ||
69 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) | |
70 | { | |
71 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); | |
72 | ||
73 | if (unlikely(shadow_value)) { | |
74 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; | |
75 | return unlikely(last_accessible_byte >= shadow_value); | |
76 | } | |
77 | ||
78 | return false; | |
79 | } | |
80 | ||
81 | static __always_inline bool memory_is_poisoned_2(unsigned long addr) | |
82 | { | |
83 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | |
84 | ||
85 | if (unlikely(*shadow_addr)) { | |
86 | if (memory_is_poisoned_1(addr + 1)) | |
87 | return true; | |
88 | ||
10f70262 XQ |
89 | /* |
90 | * If single shadow byte covers 2-byte access, we don't | |
91 | * need to do anything more. Otherwise, test the first | |
92 | * shadow byte. | |
93 | */ | |
0b24becc AR |
94 | if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
95 | return false; | |
96 | ||
97 | return unlikely(*(u8 *)shadow_addr); | |
98 | } | |
99 | ||
100 | return false; | |
101 | } | |
102 | ||
103 | static __always_inline bool memory_is_poisoned_4(unsigned long addr) | |
104 | { | |
105 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | |
106 | ||
107 | if (unlikely(*shadow_addr)) { | |
108 | if (memory_is_poisoned_1(addr + 3)) | |
109 | return true; | |
110 | ||
10f70262 XQ |
111 | /* |
112 | * If single shadow byte covers 4-byte access, we don't | |
113 | * need to do anything more. Otherwise, test the first | |
114 | * shadow byte. | |
115 | */ | |
0b24becc AR |
116 | if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
117 | return false; | |
118 | ||
119 | return unlikely(*(u8 *)shadow_addr); | |
120 | } | |
121 | ||
122 | return false; | |
123 | } | |
124 | ||
125 | static __always_inline bool memory_is_poisoned_8(unsigned long addr) | |
126 | { | |
127 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | |
128 | ||
129 | if (unlikely(*shadow_addr)) { | |
130 | if (memory_is_poisoned_1(addr + 7)) | |
131 | return true; | |
132 | ||
10f70262 XQ |
133 | /* |
134 | * If single shadow byte covers 8-byte access, we don't | |
135 | * need to do anything more. Otherwise, test the first | |
136 | * shadow byte. | |
137 | */ | |
138 | if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | |
0b24becc AR |
139 | return false; |
140 | ||
141 | return unlikely(*(u8 *)shadow_addr); | |
142 | } | |
143 | ||
144 | return false; | |
145 | } | |
146 | ||
147 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) | |
148 | { | |
149 | u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); | |
150 | ||
151 | if (unlikely(*shadow_addr)) { | |
152 | u16 shadow_first_bytes = *(u16 *)shadow_addr; | |
0b24becc AR |
153 | |
154 | if (unlikely(shadow_first_bytes)) | |
155 | return true; | |
156 | ||
10f70262 XQ |
157 | /* |
158 | * If two shadow bytes covers 16-byte access, we don't | |
159 | * need to do anything more. Otherwise, test the last | |
160 | * shadow byte. | |
161 | */ | |
162 | if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | |
0b24becc AR |
163 | return false; |
164 | ||
165 | return memory_is_poisoned_1(addr + 15); | |
166 | } | |
167 | ||
168 | return false; | |
169 | } | |
170 | ||
171 | static __always_inline unsigned long bytes_is_zero(const u8 *start, | |
172 | size_t size) | |
173 | { | |
174 | while (size) { | |
175 | if (unlikely(*start)) | |
176 | return (unsigned long)start; | |
177 | start++; | |
178 | size--; | |
179 | } | |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
184 | static __always_inline unsigned long memory_is_zero(const void *start, | |
185 | const void *end) | |
186 | { | |
187 | unsigned int words; | |
188 | unsigned long ret; | |
189 | unsigned int prefix = (unsigned long)start % 8; | |
190 | ||
191 | if (end - start <= 16) | |
192 | return bytes_is_zero(start, end - start); | |
193 | ||
194 | if (prefix) { | |
195 | prefix = 8 - prefix; | |
196 | ret = bytes_is_zero(start, prefix); | |
197 | if (unlikely(ret)) | |
198 | return ret; | |
199 | start += prefix; | |
200 | } | |
201 | ||
202 | words = (end - start) / 8; | |
203 | while (words) { | |
204 | if (unlikely(*(u64 *)start)) | |
205 | return bytes_is_zero(start, 8); | |
206 | start += 8; | |
207 | words--; | |
208 | } | |
209 | ||
210 | return bytes_is_zero(start, (end - start) % 8); | |
211 | } | |
212 | ||
213 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, | |
214 | size_t size) | |
215 | { | |
216 | unsigned long ret; | |
217 | ||
218 | ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), | |
219 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); | |
220 | ||
221 | if (unlikely(ret)) { | |
222 | unsigned long last_byte = addr + size - 1; | |
223 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); | |
224 | ||
225 | if (unlikely(ret != (unsigned long)last_shadow || | |
e0d57714 | 226 | ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) |
0b24becc AR |
227 | return true; |
228 | } | |
229 | return false; | |
230 | } | |
231 | ||
232 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) | |
233 | { | |
234 | if (__builtin_constant_p(size)) { | |
235 | switch (size) { | |
236 | case 1: | |
237 | return memory_is_poisoned_1(addr); | |
238 | case 2: | |
239 | return memory_is_poisoned_2(addr); | |
240 | case 4: | |
241 | return memory_is_poisoned_4(addr); | |
242 | case 8: | |
243 | return memory_is_poisoned_8(addr); | |
244 | case 16: | |
245 | return memory_is_poisoned_16(addr); | |
246 | default: | |
247 | BUILD_BUG(); | |
248 | } | |
249 | } | |
250 | ||
251 | return memory_is_poisoned_n(addr, size); | |
252 | } | |
253 | ||
254 | ||
255 | static __always_inline void check_memory_region(unsigned long addr, | |
256 | size_t size, bool write) | |
257 | { | |
0b24becc AR |
258 | if (unlikely(size == 0)) |
259 | return; | |
260 | ||
261 | if (unlikely((void *)addr < | |
262 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | |
e9121076 | 263 | kasan_report(addr, size, write, _RET_IP_); |
0b24becc AR |
264 | return; |
265 | } | |
266 | ||
267 | if (likely(!memory_is_poisoned(addr, size))) | |
268 | return; | |
269 | ||
270 | kasan_report(addr, size, write, _RET_IP_); | |
271 | } | |
272 | ||
393f203f AR |
273 | void __asan_loadN(unsigned long addr, size_t size); |
274 | void __asan_storeN(unsigned long addr, size_t size); | |
275 | ||
276 | #undef memset | |
277 | void *memset(void *addr, int c, size_t len) | |
278 | { | |
279 | __asan_storeN((unsigned long)addr, len); | |
280 | ||
281 | return __memset(addr, c, len); | |
282 | } | |
283 | ||
284 | #undef memmove | |
285 | void *memmove(void *dest, const void *src, size_t len) | |
286 | { | |
287 | __asan_loadN((unsigned long)src, len); | |
288 | __asan_storeN((unsigned long)dest, len); | |
289 | ||
290 | return __memmove(dest, src, len); | |
291 | } | |
292 | ||
293 | #undef memcpy | |
294 | void *memcpy(void *dest, const void *src, size_t len) | |
295 | { | |
296 | __asan_loadN((unsigned long)src, len); | |
297 | __asan_storeN((unsigned long)dest, len); | |
298 | ||
299 | return __memcpy(dest, src, len); | |
300 | } | |
301 | ||
b8c73fc2 AR |
302 | void kasan_alloc_pages(struct page *page, unsigned int order) |
303 | { | |
304 | if (likely(!PageHighMem(page))) | |
305 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | |
306 | } | |
307 | ||
308 | void kasan_free_pages(struct page *page, unsigned int order) | |
309 | { | |
310 | if (likely(!PageHighMem(page))) | |
311 | kasan_poison_shadow(page_address(page), | |
312 | PAGE_SIZE << order, | |
313 | KASAN_FREE_PAGE); | |
314 | } | |
315 | ||
0316bec2 AR |
316 | void kasan_poison_slab(struct page *page) |
317 | { | |
318 | kasan_poison_shadow(page_address(page), | |
319 | PAGE_SIZE << compound_order(page), | |
320 | KASAN_KMALLOC_REDZONE); | |
321 | } | |
322 | ||
323 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) | |
324 | { | |
325 | kasan_unpoison_shadow(object, cache->object_size); | |
326 | } | |
327 | ||
328 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |
329 | { | |
330 | kasan_poison_shadow(object, | |
331 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), | |
332 | KASAN_KMALLOC_REDZONE); | |
333 | } | |
334 | ||
335 | void kasan_slab_alloc(struct kmem_cache *cache, void *object) | |
336 | { | |
337 | kasan_kmalloc(cache, object, cache->object_size); | |
338 | } | |
339 | ||
340 | void kasan_slab_free(struct kmem_cache *cache, void *object) | |
341 | { | |
342 | unsigned long size = cache->object_size; | |
343 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | |
344 | ||
345 | /* RCU slabs could be legally used after free within the RCU period */ | |
346 | if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) | |
347 | return; | |
348 | ||
349 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); | |
350 | } | |
351 | ||
352 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) | |
353 | { | |
354 | unsigned long redzone_start; | |
355 | unsigned long redzone_end; | |
356 | ||
357 | if (unlikely(object == NULL)) | |
358 | return; | |
359 | ||
360 | redzone_start = round_up((unsigned long)(object + size), | |
361 | KASAN_SHADOW_SCALE_SIZE); | |
362 | redzone_end = round_up((unsigned long)object + cache->object_size, | |
363 | KASAN_SHADOW_SCALE_SIZE); | |
364 | ||
365 | kasan_unpoison_shadow(object, size); | |
366 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
367 | KASAN_KMALLOC_REDZONE); | |
368 | } | |
369 | EXPORT_SYMBOL(kasan_kmalloc); | |
370 | ||
371 | void kasan_kmalloc_large(const void *ptr, size_t size) | |
372 | { | |
373 | struct page *page; | |
374 | unsigned long redzone_start; | |
375 | unsigned long redzone_end; | |
376 | ||
377 | if (unlikely(ptr == NULL)) | |
378 | return; | |
379 | ||
380 | page = virt_to_page(ptr); | |
381 | redzone_start = round_up((unsigned long)(ptr + size), | |
382 | KASAN_SHADOW_SCALE_SIZE); | |
383 | redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); | |
384 | ||
385 | kasan_unpoison_shadow(ptr, size); | |
386 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
387 | KASAN_PAGE_REDZONE); | |
388 | } | |
389 | ||
390 | void kasan_krealloc(const void *object, size_t size) | |
391 | { | |
392 | struct page *page; | |
393 | ||
394 | if (unlikely(object == ZERO_SIZE_PTR)) | |
395 | return; | |
396 | ||
397 | page = virt_to_head_page(object); | |
398 | ||
399 | if (unlikely(!PageSlab(page))) | |
400 | kasan_kmalloc_large(object, size); | |
401 | else | |
402 | kasan_kmalloc(page->slab_cache, object, size); | |
403 | } | |
404 | ||
92393615 AR |
405 | void kasan_kfree(void *ptr) |
406 | { | |
407 | struct page *page; | |
408 | ||
409 | page = virt_to_head_page(ptr); | |
410 | ||
411 | if (unlikely(!PageSlab(page))) | |
412 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
413 | KASAN_FREE_PAGE); | |
414 | else | |
415 | kasan_slab_free(page->slab_cache, ptr); | |
416 | } | |
417 | ||
0316bec2 AR |
418 | void kasan_kfree_large(const void *ptr) |
419 | { | |
420 | struct page *page = virt_to_page(ptr); | |
421 | ||
422 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
423 | KASAN_FREE_PAGE); | |
424 | } | |
425 | ||
bebf56a1 AR |
426 | int kasan_module_alloc(void *addr, size_t size) |
427 | { | |
428 | void *ret; | |
429 | size_t shadow_size; | |
430 | unsigned long shadow_start; | |
431 | ||
432 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | |
433 | shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, | |
434 | PAGE_SIZE); | |
435 | ||
436 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | |
437 | return -EINVAL; | |
438 | ||
439 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, | |
440 | shadow_start + shadow_size, | |
441 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | |
442 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | |
443 | __builtin_return_address(0)); | |
a5af5aa8 AR |
444 | |
445 | if (ret) { | |
446 | find_vm_area(addr)->flags |= VM_KASAN; | |
447 | return 0; | |
448 | } | |
449 | ||
450 | return -ENOMEM; | |
bebf56a1 AR |
451 | } |
452 | ||
a5af5aa8 | 453 | void kasan_free_shadow(const struct vm_struct *vm) |
bebf56a1 | 454 | { |
a5af5aa8 AR |
455 | if (vm->flags & VM_KASAN) |
456 | vfree(kasan_mem_to_shadow(vm->addr)); | |
bebf56a1 AR |
457 | } |
458 | ||
459 | static void register_global(struct kasan_global *global) | |
460 | { | |
461 | size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); | |
462 | ||
463 | kasan_unpoison_shadow(global->beg, global->size); | |
464 | ||
465 | kasan_poison_shadow(global->beg + aligned_size, | |
466 | global->size_with_redzone - aligned_size, | |
467 | KASAN_GLOBAL_REDZONE); | |
468 | } | |
469 | ||
470 | void __asan_register_globals(struct kasan_global *globals, size_t size) | |
471 | { | |
472 | int i; | |
473 | ||
474 | for (i = 0; i < size; i++) | |
475 | register_global(&globals[i]); | |
476 | } | |
477 | EXPORT_SYMBOL(__asan_register_globals); | |
478 | ||
479 | void __asan_unregister_globals(struct kasan_global *globals, size_t size) | |
480 | { | |
481 | } | |
482 | EXPORT_SYMBOL(__asan_unregister_globals); | |
483 | ||
0b24becc AR |
484 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
485 | void __asan_load##size(unsigned long addr) \ | |
486 | { \ | |
487 | check_memory_region(addr, size, false); \ | |
488 | } \ | |
489 | EXPORT_SYMBOL(__asan_load##size); \ | |
490 | __alias(__asan_load##size) \ | |
491 | void __asan_load##size##_noabort(unsigned long); \ | |
492 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ | |
493 | void __asan_store##size(unsigned long addr) \ | |
494 | { \ | |
495 | check_memory_region(addr, size, true); \ | |
496 | } \ | |
497 | EXPORT_SYMBOL(__asan_store##size); \ | |
498 | __alias(__asan_store##size) \ | |
499 | void __asan_store##size##_noabort(unsigned long); \ | |
500 | EXPORT_SYMBOL(__asan_store##size##_noabort) | |
501 | ||
502 | DEFINE_ASAN_LOAD_STORE(1); | |
503 | DEFINE_ASAN_LOAD_STORE(2); | |
504 | DEFINE_ASAN_LOAD_STORE(4); | |
505 | DEFINE_ASAN_LOAD_STORE(8); | |
506 | DEFINE_ASAN_LOAD_STORE(16); | |
507 | ||
508 | void __asan_loadN(unsigned long addr, size_t size) | |
509 | { | |
510 | check_memory_region(addr, size, false); | |
511 | } | |
512 | EXPORT_SYMBOL(__asan_loadN); | |
513 | ||
514 | __alias(__asan_loadN) | |
515 | void __asan_loadN_noabort(unsigned long, size_t); | |
516 | EXPORT_SYMBOL(__asan_loadN_noabort); | |
517 | ||
518 | void __asan_storeN(unsigned long addr, size_t size) | |
519 | { | |
520 | check_memory_region(addr, size, true); | |
521 | } | |
522 | EXPORT_SYMBOL(__asan_storeN); | |
523 | ||
524 | __alias(__asan_storeN) | |
525 | void __asan_storeN_noabort(unsigned long, size_t); | |
526 | EXPORT_SYMBOL(__asan_storeN_noabort); | |
527 | ||
528 | /* to shut up compiler complaints */ | |
529 | void __asan_handle_no_return(void) {} | |
530 | EXPORT_SYMBOL(__asan_handle_no_return); | |
786a8959 AR |
531 | |
532 | #ifdef CONFIG_MEMORY_HOTPLUG | |
533 | static int kasan_mem_notifier(struct notifier_block *nb, | |
534 | unsigned long action, void *data) | |
535 | { | |
536 | return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK; | |
537 | } | |
538 | ||
539 | static int __init kasan_memhotplug_init(void) | |
540 | { | |
25add7ec | 541 | pr_err("WARNING: KASAN doesn't support memory hot-add\n"); |
786a8959 AR |
542 | pr_err("Memory hot-add will be disabled\n"); |
543 | ||
544 | hotplug_memory_notifier(kasan_mem_notifier, 0); | |
545 | ||
546 | return 0; | |
547 | } | |
548 | ||
549 | module_init(kasan_memhotplug_init); | |
550 | #endif |