]>
Commit | Line | Data |
---|---|---|
0b24becc AR |
1 | /* |
2 | * This file contains shadow memory manipulation code. | |
3 | * | |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
2baf9e89 | 5 | * Author: Andrey Ryabinin <[email protected]> |
0b24becc | 6 | * |
5d0926ef | 7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
0b24becc AR |
8 | * Andrey Konovalov <[email protected]> |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | */ | |
15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
17 | #define DISABLE_BRANCH_PROFILING | |
18 | ||
19 | #include <linux/export.h> | |
cd11016e | 20 | #include <linux/interrupt.h> |
0b24becc | 21 | #include <linux/init.h> |
cd11016e | 22 | #include <linux/kasan.h> |
0b24becc | 23 | #include <linux/kernel.h> |
45937254 | 24 | #include <linux/kmemleak.h> |
e3ae1163 | 25 | #include <linux/linkage.h> |
0b24becc | 26 | #include <linux/memblock.h> |
786a8959 | 27 | #include <linux/memory.h> |
0b24becc | 28 | #include <linux/mm.h> |
bebf56a1 | 29 | #include <linux/module.h> |
0b24becc AR |
30 | #include <linux/printk.h> |
31 | #include <linux/sched.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/stacktrace.h> | |
34 | #include <linux/string.h> | |
35 | #include <linux/types.h> | |
a5af5aa8 | 36 | #include <linux/vmalloc.h> |
0b24becc AR |
37 | |
38 | #include "kasan.h" | |
0316bec2 | 39 | #include "../slab.h" |
0b24becc AR |
40 | |
41 | /* | |
42 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. | |
43 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. | |
44 | */ | |
45 | static void kasan_poison_shadow(const void *address, size_t size, u8 value) | |
46 | { | |
47 | void *shadow_start, *shadow_end; | |
48 | ||
49 | shadow_start = kasan_mem_to_shadow(address); | |
50 | shadow_end = kasan_mem_to_shadow(address + size); | |
51 | ||
52 | memset(shadow_start, value, shadow_end - shadow_start); | |
53 | } | |
54 | ||
55 | void kasan_unpoison_shadow(const void *address, size_t size) | |
56 | { | |
57 | kasan_poison_shadow(address, size, 0); | |
58 | ||
59 | if (size & KASAN_SHADOW_MASK) { | |
60 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); | |
61 | *shadow = size & KASAN_SHADOW_MASK; | |
62 | } | |
63 | } | |
64 | ||
e3ae1163 MR |
65 | static void __kasan_unpoison_stack(struct task_struct *task, void *sp) |
66 | { | |
67 | void *base = task_stack_page(task); | |
68 | size_t size = sp - base; | |
69 | ||
70 | kasan_unpoison_shadow(base, size); | |
71 | } | |
72 | ||
73 | /* Unpoison the entire stack for a task. */ | |
74 | void kasan_unpoison_task_stack(struct task_struct *task) | |
75 | { | |
76 | __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); | |
77 | } | |
78 | ||
79 | /* Unpoison the stack for the current task beyond a watermark sp value. */ | |
80 | asmlinkage void kasan_unpoison_remaining_stack(void *sp) | |
81 | { | |
82 | __kasan_unpoison_stack(current, sp); | |
83 | } | |
0b24becc AR |
84 | |
85 | /* | |
86 | * All functions below always inlined so compiler could | |
87 | * perform better optimizations in each of __asan_loadX/__assn_storeX | |
88 | * depending on memory access size X. | |
89 | */ | |
90 | ||
91 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) | |
92 | { | |
93 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); | |
94 | ||
95 | if (unlikely(shadow_value)) { | |
96 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; | |
97 | return unlikely(last_accessible_byte >= shadow_value); | |
98 | } | |
99 | ||
100 | return false; | |
101 | } | |
102 | ||
103 | static __always_inline bool memory_is_poisoned_2(unsigned long addr) | |
104 | { | |
105 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | |
106 | ||
107 | if (unlikely(*shadow_addr)) { | |
108 | if (memory_is_poisoned_1(addr + 1)) | |
109 | return true; | |
110 | ||
10f70262 XQ |
111 | /* |
112 | * If single shadow byte covers 2-byte access, we don't | |
113 | * need to do anything more. Otherwise, test the first | |
114 | * shadow byte. | |
115 | */ | |
0b24becc AR |
116 | if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
117 | return false; | |
118 | ||
119 | return unlikely(*(u8 *)shadow_addr); | |
120 | } | |
121 | ||
122 | return false; | |
123 | } | |
124 | ||
125 | static __always_inline bool memory_is_poisoned_4(unsigned long addr) | |
126 | { | |
127 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | |
128 | ||
129 | if (unlikely(*shadow_addr)) { | |
130 | if (memory_is_poisoned_1(addr + 3)) | |
131 | return true; | |
132 | ||
10f70262 XQ |
133 | /* |
134 | * If single shadow byte covers 4-byte access, we don't | |
135 | * need to do anything more. Otherwise, test the first | |
136 | * shadow byte. | |
137 | */ | |
0b24becc AR |
138 | if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
139 | return false; | |
140 | ||
141 | return unlikely(*(u8 *)shadow_addr); | |
142 | } | |
143 | ||
144 | return false; | |
145 | } | |
146 | ||
147 | static __always_inline bool memory_is_poisoned_8(unsigned long addr) | |
148 | { | |
149 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | |
150 | ||
151 | if (unlikely(*shadow_addr)) { | |
152 | if (memory_is_poisoned_1(addr + 7)) | |
153 | return true; | |
154 | ||
10f70262 XQ |
155 | /* |
156 | * If single shadow byte covers 8-byte access, we don't | |
157 | * need to do anything more. Otherwise, test the first | |
158 | * shadow byte. | |
159 | */ | |
160 | if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | |
0b24becc AR |
161 | return false; |
162 | ||
163 | return unlikely(*(u8 *)shadow_addr); | |
164 | } | |
165 | ||
166 | return false; | |
167 | } | |
168 | ||
169 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) | |
170 | { | |
171 | u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); | |
172 | ||
173 | if (unlikely(*shadow_addr)) { | |
174 | u16 shadow_first_bytes = *(u16 *)shadow_addr; | |
0b24becc AR |
175 | |
176 | if (unlikely(shadow_first_bytes)) | |
177 | return true; | |
178 | ||
10f70262 XQ |
179 | /* |
180 | * If two shadow bytes covers 16-byte access, we don't | |
181 | * need to do anything more. Otherwise, test the last | |
182 | * shadow byte. | |
183 | */ | |
184 | if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | |
0b24becc AR |
185 | return false; |
186 | ||
187 | return memory_is_poisoned_1(addr + 15); | |
188 | } | |
189 | ||
190 | return false; | |
191 | } | |
192 | ||
193 | static __always_inline unsigned long bytes_is_zero(const u8 *start, | |
194 | size_t size) | |
195 | { | |
196 | while (size) { | |
197 | if (unlikely(*start)) | |
198 | return (unsigned long)start; | |
199 | start++; | |
200 | size--; | |
201 | } | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | static __always_inline unsigned long memory_is_zero(const void *start, | |
207 | const void *end) | |
208 | { | |
209 | unsigned int words; | |
210 | unsigned long ret; | |
211 | unsigned int prefix = (unsigned long)start % 8; | |
212 | ||
213 | if (end - start <= 16) | |
214 | return bytes_is_zero(start, end - start); | |
215 | ||
216 | if (prefix) { | |
217 | prefix = 8 - prefix; | |
218 | ret = bytes_is_zero(start, prefix); | |
219 | if (unlikely(ret)) | |
220 | return ret; | |
221 | start += prefix; | |
222 | } | |
223 | ||
224 | words = (end - start) / 8; | |
225 | while (words) { | |
226 | if (unlikely(*(u64 *)start)) | |
227 | return bytes_is_zero(start, 8); | |
228 | start += 8; | |
229 | words--; | |
230 | } | |
231 | ||
232 | return bytes_is_zero(start, (end - start) % 8); | |
233 | } | |
234 | ||
235 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, | |
236 | size_t size) | |
237 | { | |
238 | unsigned long ret; | |
239 | ||
240 | ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), | |
241 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); | |
242 | ||
243 | if (unlikely(ret)) { | |
244 | unsigned long last_byte = addr + size - 1; | |
245 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); | |
246 | ||
247 | if (unlikely(ret != (unsigned long)last_shadow || | |
e0d57714 | 248 | ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) |
0b24becc AR |
249 | return true; |
250 | } | |
251 | return false; | |
252 | } | |
253 | ||
254 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) | |
255 | { | |
256 | if (__builtin_constant_p(size)) { | |
257 | switch (size) { | |
258 | case 1: | |
259 | return memory_is_poisoned_1(addr); | |
260 | case 2: | |
261 | return memory_is_poisoned_2(addr); | |
262 | case 4: | |
263 | return memory_is_poisoned_4(addr); | |
264 | case 8: | |
265 | return memory_is_poisoned_8(addr); | |
266 | case 16: | |
267 | return memory_is_poisoned_16(addr); | |
268 | default: | |
269 | BUILD_BUG(); | |
270 | } | |
271 | } | |
272 | ||
273 | return memory_is_poisoned_n(addr, size); | |
274 | } | |
275 | ||
936bb4bb AR |
276 | static __always_inline void check_memory_region_inline(unsigned long addr, |
277 | size_t size, bool write, | |
278 | unsigned long ret_ip) | |
0b24becc | 279 | { |
0b24becc AR |
280 | if (unlikely(size == 0)) |
281 | return; | |
282 | ||
283 | if (unlikely((void *)addr < | |
284 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | |
936bb4bb | 285 | kasan_report(addr, size, write, ret_ip); |
0b24becc AR |
286 | return; |
287 | } | |
288 | ||
289 | if (likely(!memory_is_poisoned(addr, size))) | |
290 | return; | |
291 | ||
936bb4bb | 292 | kasan_report(addr, size, write, ret_ip); |
0b24becc AR |
293 | } |
294 | ||
936bb4bb AR |
295 | static void check_memory_region(unsigned long addr, |
296 | size_t size, bool write, | |
297 | unsigned long ret_ip) | |
298 | { | |
299 | check_memory_region_inline(addr, size, write, ret_ip); | |
300 | } | |
393f203f | 301 | |
64f8ebaf AR |
302 | void kasan_check_read(const void *p, unsigned int size) |
303 | { | |
304 | check_memory_region((unsigned long)p, size, false, _RET_IP_); | |
305 | } | |
306 | EXPORT_SYMBOL(kasan_check_read); | |
307 | ||
308 | void kasan_check_write(const void *p, unsigned int size) | |
309 | { | |
310 | check_memory_region((unsigned long)p, size, true, _RET_IP_); | |
311 | } | |
312 | EXPORT_SYMBOL(kasan_check_write); | |
313 | ||
393f203f AR |
314 | #undef memset |
315 | void *memset(void *addr, int c, size_t len) | |
316 | { | |
936bb4bb | 317 | check_memory_region((unsigned long)addr, len, true, _RET_IP_); |
393f203f AR |
318 | |
319 | return __memset(addr, c, len); | |
320 | } | |
321 | ||
322 | #undef memmove | |
323 | void *memmove(void *dest, const void *src, size_t len) | |
324 | { | |
936bb4bb AR |
325 | check_memory_region((unsigned long)src, len, false, _RET_IP_); |
326 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | |
393f203f AR |
327 | |
328 | return __memmove(dest, src, len); | |
329 | } | |
330 | ||
331 | #undef memcpy | |
332 | void *memcpy(void *dest, const void *src, size_t len) | |
333 | { | |
936bb4bb AR |
334 | check_memory_region((unsigned long)src, len, false, _RET_IP_); |
335 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | |
393f203f AR |
336 | |
337 | return __memcpy(dest, src, len); | |
338 | } | |
339 | ||
b8c73fc2 AR |
340 | void kasan_alloc_pages(struct page *page, unsigned int order) |
341 | { | |
342 | if (likely(!PageHighMem(page))) | |
343 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | |
344 | } | |
345 | ||
346 | void kasan_free_pages(struct page *page, unsigned int order) | |
347 | { | |
348 | if (likely(!PageHighMem(page))) | |
349 | kasan_poison_shadow(page_address(page), | |
350 | PAGE_SIZE << order, | |
351 | KASAN_FREE_PAGE); | |
352 | } | |
353 | ||
7ed2f9e6 AP |
354 | #ifdef CONFIG_SLAB |
355 | /* | |
356 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | |
357 | * For larger allocations larger redzones are used. | |
358 | */ | |
359 | static size_t optimal_redzone(size_t object_size) | |
360 | { | |
361 | int rz = | |
362 | object_size <= 64 - 16 ? 16 : | |
363 | object_size <= 128 - 32 ? 32 : | |
364 | object_size <= 512 - 64 ? 64 : | |
365 | object_size <= 4096 - 128 ? 128 : | |
366 | object_size <= (1 << 14) - 256 ? 256 : | |
367 | object_size <= (1 << 15) - 512 ? 512 : | |
368 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; | |
369 | return rz; | |
370 | } | |
371 | ||
372 | void kasan_cache_create(struct kmem_cache *cache, size_t *size, | |
373 | unsigned long *flags) | |
374 | { | |
375 | int redzone_adjust; | |
376 | /* Make sure the adjusted size is still less than | |
377 | * KMALLOC_MAX_CACHE_SIZE. | |
378 | * TODO: this check is only useful for SLAB, but not SLUB. We'll need | |
379 | * to skip it for SLUB when it starts using kasan_cache_create(). | |
380 | */ | |
381 | if (*size > KMALLOC_MAX_CACHE_SIZE - | |
382 | sizeof(struct kasan_alloc_meta) - | |
383 | sizeof(struct kasan_free_meta)) | |
384 | return; | |
385 | *flags |= SLAB_KASAN; | |
386 | /* Add alloc meta. */ | |
387 | cache->kasan_info.alloc_meta_offset = *size; | |
388 | *size += sizeof(struct kasan_alloc_meta); | |
389 | ||
390 | /* Add free meta. */ | |
391 | if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || | |
392 | cache->object_size < sizeof(struct kasan_free_meta)) { | |
393 | cache->kasan_info.free_meta_offset = *size; | |
394 | *size += sizeof(struct kasan_free_meta); | |
395 | } | |
396 | redzone_adjust = optimal_redzone(cache->object_size) - | |
397 | (*size - cache->object_size); | |
398 | if (redzone_adjust > 0) | |
399 | *size += redzone_adjust; | |
400 | *size = min(KMALLOC_MAX_CACHE_SIZE, | |
401 | max(*size, | |
402 | cache->object_size + | |
403 | optimal_redzone(cache->object_size))); | |
404 | } | |
405 | #endif | |
406 | ||
55834c59 AP |
407 | void kasan_cache_shrink(struct kmem_cache *cache) |
408 | { | |
409 | quarantine_remove_cache(cache); | |
410 | } | |
411 | ||
412 | void kasan_cache_destroy(struct kmem_cache *cache) | |
413 | { | |
414 | quarantine_remove_cache(cache); | |
415 | } | |
416 | ||
0316bec2 AR |
417 | void kasan_poison_slab(struct page *page) |
418 | { | |
419 | kasan_poison_shadow(page_address(page), | |
420 | PAGE_SIZE << compound_order(page), | |
421 | KASAN_KMALLOC_REDZONE); | |
422 | } | |
423 | ||
424 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) | |
425 | { | |
426 | kasan_unpoison_shadow(object, cache->object_size); | |
427 | } | |
428 | ||
429 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |
430 | { | |
431 | kasan_poison_shadow(object, | |
432 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), | |
433 | KASAN_KMALLOC_REDZONE); | |
7ed2f9e6 AP |
434 | #ifdef CONFIG_SLAB |
435 | if (cache->flags & SLAB_KASAN) { | |
436 | struct kasan_alloc_meta *alloc_info = | |
437 | get_alloc_info(cache, object); | |
438 | alloc_info->state = KASAN_STATE_INIT; | |
439 | } | |
440 | #endif | |
0316bec2 AR |
441 | } |
442 | ||
cd11016e AP |
443 | #ifdef CONFIG_SLAB |
444 | static inline int in_irqentry_text(unsigned long ptr) | |
445 | { | |
446 | return (ptr >= (unsigned long)&__irqentry_text_start && | |
447 | ptr < (unsigned long)&__irqentry_text_end) || | |
448 | (ptr >= (unsigned long)&__softirqentry_text_start && | |
449 | ptr < (unsigned long)&__softirqentry_text_end); | |
450 | } | |
451 | ||
452 | static inline void filter_irq_stacks(struct stack_trace *trace) | |
453 | { | |
454 | int i; | |
455 | ||
456 | if (!trace->nr_entries) | |
457 | return; | |
458 | for (i = 0; i < trace->nr_entries; i++) | |
459 | if (in_irqentry_text(trace->entries[i])) { | |
460 | /* Include the irqentry function into the stack. */ | |
461 | trace->nr_entries = i + 1; | |
462 | break; | |
463 | } | |
464 | } | |
465 | ||
466 | static inline depot_stack_handle_t save_stack(gfp_t flags) | |
467 | { | |
468 | unsigned long entries[KASAN_STACK_DEPTH]; | |
469 | struct stack_trace trace = { | |
470 | .nr_entries = 0, | |
471 | .entries = entries, | |
472 | .max_entries = KASAN_STACK_DEPTH, | |
473 | .skip = 0 | |
474 | }; | |
475 | ||
476 | save_stack_trace(&trace); | |
477 | filter_irq_stacks(&trace); | |
478 | if (trace.nr_entries != 0 && | |
479 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | |
480 | trace.nr_entries--; | |
481 | ||
482 | return depot_save_stack(&trace, flags); | |
483 | } | |
484 | ||
485 | static inline void set_track(struct kasan_track *track, gfp_t flags) | |
7ed2f9e6 | 486 | { |
7ed2f9e6 | 487 | track->pid = current->pid; |
cd11016e | 488 | track->stack = save_stack(flags); |
7ed2f9e6 AP |
489 | } |
490 | ||
7ed2f9e6 AP |
491 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
492 | const void *object) | |
493 | { | |
cd11016e | 494 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); |
7ed2f9e6 AP |
495 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
496 | } | |
497 | ||
498 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, | |
499 | const void *object) | |
500 | { | |
cd11016e | 501 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
7ed2f9e6 AP |
502 | return (void *)object + cache->kasan_info.free_meta_offset; |
503 | } | |
504 | #endif | |
505 | ||
505f5dcb | 506 | void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) |
0316bec2 | 507 | { |
505f5dcb | 508 | kasan_kmalloc(cache, object, cache->object_size, flags); |
0316bec2 AR |
509 | } |
510 | ||
55834c59 | 511 | void kasan_poison_slab_free(struct kmem_cache *cache, void *object) |
0316bec2 AR |
512 | { |
513 | unsigned long size = cache->object_size; | |
514 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | |
515 | ||
516 | /* RCU slabs could be legally used after free within the RCU period */ | |
517 | if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) | |
518 | return; | |
519 | ||
55834c59 AP |
520 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); |
521 | } | |
522 | ||
523 | bool kasan_slab_free(struct kmem_cache *cache, void *object) | |
524 | { | |
7ed2f9e6 | 525 | #ifdef CONFIG_SLAB |
55834c59 AP |
526 | /* RCU slabs could be legally used after free within the RCU period */ |
527 | if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) | |
528 | return false; | |
529 | ||
530 | if (likely(cache->flags & SLAB_KASAN)) { | |
7ed2f9e6 AP |
531 | struct kasan_alloc_meta *alloc_info = |
532 | get_alloc_info(cache, object); | |
55834c59 AP |
533 | struct kasan_free_meta *free_info = |
534 | get_free_info(cache, object); | |
535 | ||
536 | switch (alloc_info->state) { | |
537 | case KASAN_STATE_ALLOC: | |
538 | alloc_info->state = KASAN_STATE_QUARANTINE; | |
539 | quarantine_put(free_info, cache); | |
540 | set_track(&free_info->track, GFP_NOWAIT); | |
541 | kasan_poison_slab_free(cache, object); | |
542 | return true; | |
543 | case KASAN_STATE_QUARANTINE: | |
544 | case KASAN_STATE_FREE: | |
545 | pr_err("Double free"); | |
546 | dump_stack(); | |
547 | break; | |
548 | default: | |
549 | break; | |
550 | } | |
7ed2f9e6 | 551 | } |
55834c59 AP |
552 | return false; |
553 | #else | |
554 | kasan_poison_slab_free(cache, object); | |
555 | return false; | |
7ed2f9e6 | 556 | #endif |
0316bec2 AR |
557 | } |
558 | ||
505f5dcb AP |
559 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, |
560 | gfp_t flags) | |
0316bec2 AR |
561 | { |
562 | unsigned long redzone_start; | |
563 | unsigned long redzone_end; | |
564 | ||
55834c59 AP |
565 | if (flags & __GFP_RECLAIM) |
566 | quarantine_reduce(); | |
567 | ||
0316bec2 AR |
568 | if (unlikely(object == NULL)) |
569 | return; | |
570 | ||
571 | redzone_start = round_up((unsigned long)(object + size), | |
572 | KASAN_SHADOW_SCALE_SIZE); | |
573 | redzone_end = round_up((unsigned long)object + cache->object_size, | |
574 | KASAN_SHADOW_SCALE_SIZE); | |
575 | ||
576 | kasan_unpoison_shadow(object, size); | |
577 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
578 | KASAN_KMALLOC_REDZONE); | |
7ed2f9e6 AP |
579 | #ifdef CONFIG_SLAB |
580 | if (cache->flags & SLAB_KASAN) { | |
581 | struct kasan_alloc_meta *alloc_info = | |
582 | get_alloc_info(cache, object); | |
583 | ||
584 | alloc_info->state = KASAN_STATE_ALLOC; | |
585 | alloc_info->alloc_size = size; | |
cd11016e | 586 | set_track(&alloc_info->track, flags); |
7ed2f9e6 AP |
587 | } |
588 | #endif | |
0316bec2 AR |
589 | } |
590 | EXPORT_SYMBOL(kasan_kmalloc); | |
591 | ||
505f5dcb | 592 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
0316bec2 AR |
593 | { |
594 | struct page *page; | |
595 | unsigned long redzone_start; | |
596 | unsigned long redzone_end; | |
597 | ||
55834c59 AP |
598 | if (flags & __GFP_RECLAIM) |
599 | quarantine_reduce(); | |
600 | ||
0316bec2 AR |
601 | if (unlikely(ptr == NULL)) |
602 | return; | |
603 | ||
604 | page = virt_to_page(ptr); | |
605 | redzone_start = round_up((unsigned long)(ptr + size), | |
606 | KASAN_SHADOW_SCALE_SIZE); | |
607 | redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); | |
608 | ||
609 | kasan_unpoison_shadow(ptr, size); | |
610 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
611 | KASAN_PAGE_REDZONE); | |
612 | } | |
613 | ||
505f5dcb | 614 | void kasan_krealloc(const void *object, size_t size, gfp_t flags) |
0316bec2 AR |
615 | { |
616 | struct page *page; | |
617 | ||
618 | if (unlikely(object == ZERO_SIZE_PTR)) | |
619 | return; | |
620 | ||
621 | page = virt_to_head_page(object); | |
622 | ||
623 | if (unlikely(!PageSlab(page))) | |
505f5dcb | 624 | kasan_kmalloc_large(object, size, flags); |
0316bec2 | 625 | else |
505f5dcb | 626 | kasan_kmalloc(page->slab_cache, object, size, flags); |
0316bec2 AR |
627 | } |
628 | ||
92393615 AR |
629 | void kasan_kfree(void *ptr) |
630 | { | |
631 | struct page *page; | |
632 | ||
633 | page = virt_to_head_page(ptr); | |
634 | ||
635 | if (unlikely(!PageSlab(page))) | |
636 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
637 | KASAN_FREE_PAGE); | |
638 | else | |
639 | kasan_slab_free(page->slab_cache, ptr); | |
640 | } | |
641 | ||
0316bec2 AR |
642 | void kasan_kfree_large(const void *ptr) |
643 | { | |
644 | struct page *page = virt_to_page(ptr); | |
645 | ||
646 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
647 | KASAN_FREE_PAGE); | |
648 | } | |
649 | ||
bebf56a1 AR |
650 | int kasan_module_alloc(void *addr, size_t size) |
651 | { | |
652 | void *ret; | |
653 | size_t shadow_size; | |
654 | unsigned long shadow_start; | |
655 | ||
656 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | |
657 | shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, | |
658 | PAGE_SIZE); | |
659 | ||
660 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | |
661 | return -EINVAL; | |
662 | ||
663 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, | |
664 | shadow_start + shadow_size, | |
665 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | |
666 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | |
667 | __builtin_return_address(0)); | |
a5af5aa8 AR |
668 | |
669 | if (ret) { | |
670 | find_vm_area(addr)->flags |= VM_KASAN; | |
45937254 | 671 | kmemleak_ignore(ret); |
a5af5aa8 AR |
672 | return 0; |
673 | } | |
674 | ||
675 | return -ENOMEM; | |
bebf56a1 AR |
676 | } |
677 | ||
a5af5aa8 | 678 | void kasan_free_shadow(const struct vm_struct *vm) |
bebf56a1 | 679 | { |
a5af5aa8 AR |
680 | if (vm->flags & VM_KASAN) |
681 | vfree(kasan_mem_to_shadow(vm->addr)); | |
bebf56a1 AR |
682 | } |
683 | ||
684 | static void register_global(struct kasan_global *global) | |
685 | { | |
686 | size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); | |
687 | ||
688 | kasan_unpoison_shadow(global->beg, global->size); | |
689 | ||
690 | kasan_poison_shadow(global->beg + aligned_size, | |
691 | global->size_with_redzone - aligned_size, | |
692 | KASAN_GLOBAL_REDZONE); | |
693 | } | |
694 | ||
695 | void __asan_register_globals(struct kasan_global *globals, size_t size) | |
696 | { | |
697 | int i; | |
698 | ||
699 | for (i = 0; i < size; i++) | |
700 | register_global(&globals[i]); | |
701 | } | |
702 | EXPORT_SYMBOL(__asan_register_globals); | |
703 | ||
704 | void __asan_unregister_globals(struct kasan_global *globals, size_t size) | |
705 | { | |
706 | } | |
707 | EXPORT_SYMBOL(__asan_unregister_globals); | |
708 | ||
936bb4bb AR |
709 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
710 | void __asan_load##size(unsigned long addr) \ | |
711 | { \ | |
712 | check_memory_region_inline(addr, size, false, _RET_IP_);\ | |
713 | } \ | |
714 | EXPORT_SYMBOL(__asan_load##size); \ | |
715 | __alias(__asan_load##size) \ | |
716 | void __asan_load##size##_noabort(unsigned long); \ | |
717 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ | |
718 | void __asan_store##size(unsigned long addr) \ | |
719 | { \ | |
720 | check_memory_region_inline(addr, size, true, _RET_IP_); \ | |
721 | } \ | |
722 | EXPORT_SYMBOL(__asan_store##size); \ | |
723 | __alias(__asan_store##size) \ | |
724 | void __asan_store##size##_noabort(unsigned long); \ | |
0b24becc AR |
725 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
726 | ||
727 | DEFINE_ASAN_LOAD_STORE(1); | |
728 | DEFINE_ASAN_LOAD_STORE(2); | |
729 | DEFINE_ASAN_LOAD_STORE(4); | |
730 | DEFINE_ASAN_LOAD_STORE(8); | |
731 | DEFINE_ASAN_LOAD_STORE(16); | |
732 | ||
733 | void __asan_loadN(unsigned long addr, size_t size) | |
734 | { | |
936bb4bb | 735 | check_memory_region(addr, size, false, _RET_IP_); |
0b24becc AR |
736 | } |
737 | EXPORT_SYMBOL(__asan_loadN); | |
738 | ||
739 | __alias(__asan_loadN) | |
740 | void __asan_loadN_noabort(unsigned long, size_t); | |
741 | EXPORT_SYMBOL(__asan_loadN_noabort); | |
742 | ||
743 | void __asan_storeN(unsigned long addr, size_t size) | |
744 | { | |
936bb4bb | 745 | check_memory_region(addr, size, true, _RET_IP_); |
0b24becc AR |
746 | } |
747 | EXPORT_SYMBOL(__asan_storeN); | |
748 | ||
749 | __alias(__asan_storeN) | |
750 | void __asan_storeN_noabort(unsigned long, size_t); | |
751 | EXPORT_SYMBOL(__asan_storeN_noabort); | |
752 | ||
753 | /* to shut up compiler complaints */ | |
754 | void __asan_handle_no_return(void) {} | |
755 | EXPORT_SYMBOL(__asan_handle_no_return); | |
786a8959 AR |
756 | |
757 | #ifdef CONFIG_MEMORY_HOTPLUG | |
758 | static int kasan_mem_notifier(struct notifier_block *nb, | |
759 | unsigned long action, void *data) | |
760 | { | |
761 | return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK; | |
762 | } | |
763 | ||
764 | static int __init kasan_memhotplug_init(void) | |
765 | { | |
25add7ec | 766 | pr_err("WARNING: KASAN doesn't support memory hot-add\n"); |
786a8959 AR |
767 | pr_err("Memory hot-add will be disabled\n"); |
768 | ||
769 | hotplug_memory_notifier(kasan_mem_notifier, 0); | |
770 | ||
771 | return 0; | |
772 | } | |
773 | ||
774 | module_init(kasan_memhotplug_init); | |
775 | #endif |