]>
Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
7b718769 NS |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
4 | * All Rights Reserved. | |
1da177e4 | 5 | */ |
0ad95687 | 6 | #include "xfs.h" |
3fcfab16 | 7 | #include <linux/backing-dev.h> |
4f10700a | 8 | #include "xfs_message.h" |
0ad95687 | 9 | #include "xfs_trace.h" |
1da177e4 | 10 | |
1da177e4 | 11 | void * |
77ba7877 | 12 | kmem_alloc(size_t size, xfs_km_flags_t flags) |
1da177e4 | 13 | { |
27496a8c AV |
14 | int retries = 0; |
15 | gfp_t lflags = kmem_flags_convert(flags); | |
16 | void *ptr; | |
1da177e4 | 17 | |
0ad95687 DC |
18 | trace_kmem_alloc(size, flags, _RET_IP_); |
19 | ||
1da177e4 | 20 | do { |
bdfb0430 | 21 | ptr = kmalloc(size, lflags); |
707e0dda | 22 | if (ptr || (flags & KM_MAYFAIL)) |
1da177e4 LT |
23 | return ptr; |
24 | if (!(++retries % 100)) | |
4f10700a | 25 | xfs_err(NULL, |
847f9f68 | 26 | "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)", |
5bf97b1c | 27 | current->comm, current->pid, |
847f9f68 | 28 | (unsigned int)size, __func__, lflags); |
8aa7e847 | 29 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
1da177e4 LT |
30 | } while (1); |
31 | } | |
32 | ||
f8f9ee47 DC |
33 | |
34 | /* | |
cf085a1b | 35 | * __vmalloc() will allocate data pages and auxiliary structures (e.g. |
f8f9ee47 DC |
36 | * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence |
37 | * we need to tell memory reclaim that we are in such a context via | |
38 | * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here | |
39 | * and potentially deadlocking. | |
40 | */ | |
41 | static void * | |
42 | __kmem_vmalloc(size_t size, xfs_km_flags_t flags) | |
fdd3ccee | 43 | { |
9ba1fb2c | 44 | unsigned nofs_flag = 0; |
fdd3ccee | 45 | void *ptr; |
f8f9ee47 | 46 | gfp_t lflags = kmem_flags_convert(flags); |
ae687e58 | 47 | |
9ba1fb2c MH |
48 | if (flags & KM_NOFS) |
49 | nofs_flag = memalloc_nofs_save(); | |
ae687e58 | 50 | |
88dca4ca | 51 | ptr = __vmalloc(size, lflags); |
ae687e58 | 52 | |
9ba1fb2c MH |
53 | if (flags & KM_NOFS) |
54 | memalloc_nofs_restore(nofs_flag); | |
ae687e58 DC |
55 | |
56 | return ptr; | |
fdd3ccee DC |
57 | } |
58 | ||
f8f9ee47 DC |
59 | /* |
60 | * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned | |
61 | * to the @align_mask. We only guarantee alignment up to page size, we'll clamp | |
62 | * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE | |
63 | * aligned region. | |
64 | */ | |
65 | void * | |
66 | kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags) | |
67 | { | |
68 | void *ptr; | |
69 | ||
70 | trace_kmem_alloc_io(size, flags, _RET_IP_); | |
71 | ||
72 | if (WARN_ON_ONCE(align_mask >= PAGE_SIZE)) | |
73 | align_mask = PAGE_SIZE - 1; | |
74 | ||
75 | ptr = kmem_alloc(size, flags | KM_MAYFAIL); | |
76 | if (ptr) { | |
77 | if (!((uintptr_t)ptr & align_mask)) | |
78 | return ptr; | |
79 | kfree(ptr); | |
80 | } | |
81 | return __kmem_vmalloc(size, flags); | |
82 | } | |
83 | ||
84 | void * | |
85 | kmem_alloc_large(size_t size, xfs_km_flags_t flags) | |
86 | { | |
87 | void *ptr; | |
88 | ||
89 | trace_kmem_alloc_large(size, flags, _RET_IP_); | |
90 | ||
91 | ptr = kmem_alloc(size, flags | KM_MAYFAIL); | |
92 | if (ptr) | |
93 | return ptr; | |
94 | return __kmem_vmalloc(size, flags); | |
95 | } |