]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_MEMPOLICY_H |
2 | #define _LINUX_MEMPOLICY_H 1 | |
3 | ||
4 | #include <linux/errno.h> | |
5 | ||
6 | /* | |
7 | * NUMA memory policies for Linux. | |
8 | * Copyright 2003,2004 Andi Kleen SuSE Labs | |
9 | */ | |
10 | ||
11 | /* Policies */ | |
12 | #define MPOL_DEFAULT 0 | |
13 | #define MPOL_PREFERRED 1 | |
14 | #define MPOL_BIND 2 | |
15 | #define MPOL_INTERLEAVE 3 | |
16 | ||
17 | #define MPOL_MAX MPOL_INTERLEAVE | |
18 | ||
19 | /* Flags for get_mem_policy */ | |
20 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ | |
21 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ | |
22 | ||
23 | /* Flags for mbind */ | |
24 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ | |
dc9aa5b9 CL |
25 | #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ |
26 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ | |
27 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ | |
1da177e4 LT |
28 | |
29 | #ifdef __KERNEL__ | |
30 | ||
31 | #include <linux/config.h> | |
32 | #include <linux/mmzone.h> | |
1da177e4 LT |
33 | #include <linux/slab.h> |
34 | #include <linux/rbtree.h> | |
35 | #include <linux/spinlock.h> | |
dfcd3c0d | 36 | #include <linux/nodemask.h> |
1da177e4 LT |
37 | |
38 | struct vm_area_struct; | |
39 | ||
40 | #ifdef CONFIG_NUMA | |
41 | ||
42 | /* | |
43 | * Describe a memory policy. | |
44 | * | |
45 | * A mempolicy can be either associated with a process or with a VMA. | |
46 | * For VMA related allocations the VMA policy is preferred, otherwise | |
47 | * the process policy is used. Interrupts ignore the memory policy | |
48 | * of the current process. | |
49 | * | |
50 | * Locking policy for interlave: | |
51 | * In process context there is no locking because only the process accesses | |
52 | * its own state. All vma manipulation is somewhat protected by a down_read on | |
b8072f09 | 53 | * mmap_sem. |
1da177e4 LT |
54 | * |
55 | * Freeing policy: | |
56 | * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. | |
57 | * All other policies don't have any external state. mpol_free() handles this. | |
58 | * | |
59 | * Copying policy objects: | |
60 | * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. | |
61 | */ | |
62 | struct mempolicy { | |
63 | atomic_t refcnt; | |
64 | short policy; /* See MPOL_* above */ | |
65 | union { | |
66 | struct zonelist *zonelist; /* bind */ | |
67 | short preferred_node; /* preferred */ | |
dfcd3c0d | 68 | nodemask_t nodes; /* interleave */ |
1da177e4 LT |
69 | /* undefined for default */ |
70 | } v; | |
74cb2155 | 71 | nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ |
1da177e4 LT |
72 | }; |
73 | ||
74 | /* | |
75 | * Support for managing mempolicy data objects (clone, copy, destroy) | |
76 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | |
77 | */ | |
78 | ||
79 | extern void __mpol_free(struct mempolicy *pol); | |
80 | static inline void mpol_free(struct mempolicy *pol) | |
81 | { | |
82 | if (pol) | |
83 | __mpol_free(pol); | |
84 | } | |
85 | ||
86 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); | |
87 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) | |
88 | { | |
89 | if (pol) | |
90 | pol = __mpol_copy(pol); | |
91 | return pol; | |
92 | } | |
93 | ||
94 | #define vma_policy(vma) ((vma)->vm_policy) | |
95 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) | |
96 | ||
97 | static inline void mpol_get(struct mempolicy *pol) | |
98 | { | |
99 | if (pol) | |
100 | atomic_inc(&pol->refcnt); | |
101 | } | |
102 | ||
103 | extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); | |
104 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
105 | { | |
106 | if (a == b) | |
107 | return 1; | |
108 | return __mpol_equal(a, b); | |
109 | } | |
110 | #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) | |
111 | ||
112 | /* Could later add inheritance of the process policy here. */ | |
113 | ||
114 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) | |
115 | ||
1da177e4 LT |
116 | /* |
117 | * Tree of shared policies for a shared memory region. | |
118 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | |
119 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | |
120 | * bytes, so that we can work with shared memory segments bigger than | |
121 | * unsigned long. | |
122 | */ | |
123 | ||
124 | struct sp_node { | |
125 | struct rb_node nd; | |
126 | unsigned long start, end; | |
127 | struct mempolicy *policy; | |
128 | }; | |
129 | ||
130 | struct shared_policy { | |
131 | struct rb_root root; | |
132 | spinlock_t lock; | |
133 | }; | |
134 | ||
7339ff83 RH |
135 | void mpol_shared_policy_init(struct shared_policy *info, int policy, |
136 | nodemask_t *nodes); | |
1da177e4 LT |
137 | int mpol_set_shared_policy(struct shared_policy *info, |
138 | struct vm_area_struct *vma, | |
139 | struct mempolicy *new); | |
140 | void mpol_free_shared_policy(struct shared_policy *p); | |
141 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |
142 | unsigned long idx); | |
143 | ||
144 | extern void numa_default_policy(void); | |
145 | extern void numa_policy_init(void); | |
74cb2155 PJ |
146 | extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new); |
147 | extern void mpol_rebind_task(struct task_struct *tsk, | |
148 | const nodemask_t *new); | |
4225399a | 149 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
c61afb18 | 150 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
4225399a PJ |
151 | #define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) |
152 | ||
153 | #ifdef CONFIG_CPUSET | |
154 | #define current_cpuset_is_being_rebound() \ | |
155 | (cpuset_being_rebound == current->cpuset) | |
156 | #else | |
157 | #define current_cpuset_is_being_rebound() 0 | |
158 | #endif | |
159 | ||
d42c6997 | 160 | extern struct mempolicy default_policy; |
5da7ca86 CL |
161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
162 | unsigned long addr); | |
dc85da15 | 163 | extern unsigned slab_node(struct mempolicy *policy); |
1da177e4 | 164 | |
4be38e35 CL |
165 | extern int policy_zone; |
166 | ||
167 | static inline void check_highest_zone(int k) | |
168 | { | |
169 | if (k > policy_zone) | |
170 | policy_zone = k; | |
171 | } | |
172 | ||
39743889 CL |
173 | int do_migrate_pages(struct mm_struct *mm, |
174 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); | |
175 | ||
4225399a PJ |
176 | extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */ |
177 | ||
1da177e4 LT |
178 | #else |
179 | ||
180 | struct mempolicy {}; | |
181 | ||
182 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
183 | { | |
184 | return 1; | |
185 | } | |
186 | #define vma_mpol_equal(a,b) 1 | |
187 | ||
188 | #define mpol_set_vma_default(vma) do {} while(0) | |
189 | ||
190 | static inline void mpol_free(struct mempolicy *p) | |
191 | { | |
192 | } | |
193 | ||
194 | static inline void mpol_get(struct mempolicy *pol) | |
195 | { | |
196 | } | |
197 | ||
198 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) | |
199 | { | |
200 | return NULL; | |
201 | } | |
202 | ||
1da177e4 LT |
203 | struct shared_policy {}; |
204 | ||
205 | static inline int mpol_set_shared_policy(struct shared_policy *info, | |
206 | struct vm_area_struct *vma, | |
207 | struct mempolicy *new) | |
208 | { | |
209 | return -EINVAL; | |
210 | } | |
211 | ||
7339ff83 RH |
212 | static inline void mpol_shared_policy_init(struct shared_policy *info, |
213 | int policy, nodemask_t *nodes) | |
1da177e4 LT |
214 | { |
215 | } | |
216 | ||
217 | static inline void mpol_free_shared_policy(struct shared_policy *p) | |
218 | { | |
219 | } | |
220 | ||
221 | static inline struct mempolicy * | |
222 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | |
223 | { | |
224 | return NULL; | |
225 | } | |
226 | ||
227 | #define vma_policy(vma) NULL | |
228 | #define vma_set_policy(vma, pol) do {} while(0) | |
229 | ||
230 | static inline void numa_policy_init(void) | |
231 | { | |
232 | } | |
233 | ||
234 | static inline void numa_default_policy(void) | |
235 | { | |
236 | } | |
237 | ||
74cb2155 PJ |
238 | static inline void mpol_rebind_policy(struct mempolicy *pol, |
239 | const nodemask_t *new) | |
240 | { | |
241 | } | |
242 | ||
243 | static inline void mpol_rebind_task(struct task_struct *tsk, | |
68860ec1 PJ |
244 | const nodemask_t *new) |
245 | { | |
246 | } | |
247 | ||
4225399a PJ |
248 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
249 | { | |
250 | } | |
251 | ||
c61afb18 PJ |
252 | static inline void mpol_fix_fork_child_flag(struct task_struct *p) |
253 | { | |
254 | } | |
255 | ||
4225399a PJ |
256 | #define set_cpuset_being_rebound(x) do {} while (0) |
257 | ||
5da7ca86 CL |
258 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
259 | unsigned long addr) | |
260 | { | |
261 | return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); | |
262 | } | |
263 | ||
45b07ef3 PJ |
264 | static inline int do_migrate_pages(struct mm_struct *mm, |
265 | const nodemask_t *from_nodes, | |
266 | const nodemask_t *to_nodes, int flags) | |
267 | { | |
268 | return 0; | |
269 | } | |
270 | ||
4be38e35 CL |
271 | static inline void check_highest_zone(int k) |
272 | { | |
273 | } | |
1da177e4 LT |
274 | #endif /* CONFIG_NUMA */ |
275 | #endif /* __KERNEL__ */ | |
276 | ||
277 | #endif |