]>
Commit | Line | Data |
---|---|---|
508578f2 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
7b718769 NS |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
4 | * All Rights Reserved. | |
1da177e4 LT |
5 | */ |
6 | #ifndef __XFS_SUPPORT_KMEM_H__ | |
7 | #define __XFS_SUPPORT_KMEM_H__ | |
8 | ||
9 | #include <linux/slab.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/mm.h> | |
bdfb0430 | 12 | #include <linux/vmalloc.h> |
1da177e4 | 13 | |
8758280f NS |
14 | /* |
15 | * General memory allocation interfaces | |
16 | */ | |
17 | ||
77ba7877 | 18 | typedef unsigned __bitwise xfs_km_flags_t; |
77ba7877 AV |
19 | #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) |
20 | #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) | |
359d992b | 21 | #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) |
6dcde60e | 22 | #define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u) |
8758280f NS |
23 | |
24 | /* | |
25 | * We use a special process flag to avoid recursive callbacks into | |
26 | * the filesystem during transactions. We will also issue our own | |
27 | * warnings, so we explicitly skip any generic ones (silly of us). | |
28 | */ | |
29 | static inline gfp_t | |
77ba7877 | 30 | kmem_flags_convert(xfs_km_flags_t flags) |
1da177e4 | 31 | { |
8758280f | 32 | gfp_t lflags; |
1da177e4 | 33 | |
6dcde60e | 34 | BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP)); |
1da177e4 | 35 | |
707e0dda TH |
36 | lflags = GFP_KERNEL | __GFP_NOWARN; |
37 | if (flags & KM_NOFS) | |
38 | lflags &= ~__GFP_FS; | |
359d992b | 39 | |
91c63ecd MH |
40 | /* |
41 | * Default page/slab allocator behavior is to retry for ever | |
42 | * for small allocations. We can override this behavior by using | |
43 | * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long | |
44 | * as it is feasible but rather fail than retry forever for all | |
45 | * request sizes. | |
46 | */ | |
47 | if (flags & KM_MAYFAIL) | |
48 | lflags |= __GFP_RETRY_MAYFAIL; | |
49 | ||
359d992b GZ |
50 | if (flags & KM_ZERO) |
51 | lflags |= __GFP_ZERO; | |
52 | ||
6dcde60e DW |
53 | if (flags & KM_NOLOCKDEP) |
54 | lflags |= __GFP_NOLOCKDEP; | |
55 | ||
8758280f | 56 | return lflags; |
1da177e4 LT |
57 | } |
58 | ||
77ba7877 | 59 | extern void *kmem_alloc(size_t, xfs_km_flags_t); |
f8f9ee47 | 60 | extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags); |
cb0a8d23 | 61 | extern void *kmem_alloc_large(size_t size, xfs_km_flags_t); |
f3d21552 WY |
62 | static inline void kmem_free(const void *ptr) |
63 | { | |
64 | kvfree(ptr); | |
65 | } | |
8758280f | 66 | |
bdfb0430 | 67 | |
359d992b GZ |
68 | static inline void * |
69 | kmem_zalloc(size_t size, xfs_km_flags_t flags) | |
70 | { | |
71 | return kmem_alloc(size, flags | KM_ZERO); | |
cb0a8d23 DC |
72 | } |
73 | ||
8758280f NS |
74 | /* |
75 | * Zone interfaces | |
76 | */ | |
77 | ||
8758280f NS |
78 | #define kmem_zone kmem_cache |
79 | #define kmem_zone_t struct kmem_cache | |
80 | ||
72945d86 CH |
81 | static inline struct page * |
82 | kmem_to_page(void *addr) | |
83 | { | |
84 | if (is_vmalloc_addr(addr)) | |
85 | return vmalloc_to_page(addr); | |
86 | return virt_to_page(addr); | |
87 | } | |
88 | ||
1da177e4 | 89 | #endif /* __XFS_SUPPORT_KMEM_H__ */ |