]>
Commit | Line | Data |
---|---|---|
6e84f315 IM |
1 | #ifndef _LINUX_SCHED_MM_H |
2 | #define _LINUX_SCHED_MM_H | |
3 | ||
b8d6d80b IM |
4 | #include <linux/kernel.h> |
5 | #include <linux/atomic.h> | |
6e84f315 | 6 | #include <linux/sched.h> |
589ee628 | 7 | #include <linux/mm_types.h> |
fd771233 | 8 | #include <linux/gfp.h> |
6e84f315 | 9 | |
68e21be2 IM |
10 | /* |
11 | * Routines for handling mm_structs | |
12 | */ | |
13 | extern struct mm_struct * mm_alloc(void); | |
14 | ||
15 | /** | |
16 | * mmgrab() - Pin a &struct mm_struct. | |
17 | * @mm: The &struct mm_struct to pin. | |
18 | * | |
19 | * Make sure that @mm will not get freed even after the owning task | |
20 | * exits. This doesn't guarantee that the associated address space | |
21 | * will still exist later on and mmget_not_zero() has to be used before | |
22 | * accessing it. | |
23 | * | |
24 | * This is a preferred way to to pin @mm for a longer/unbounded amount | |
25 | * of time. | |
26 | * | |
27 | * Use mmdrop() to release the reference acquired by mmgrab(). | |
28 | * | |
29 | * See also <Documentation/vm/active_mm.txt> for an in-depth explanation | |
30 | * of &mm_struct.mm_count vs &mm_struct.mm_users. | |
31 | */ | |
32 | static inline void mmgrab(struct mm_struct *mm) | |
33 | { | |
34 | atomic_inc(&mm->mm_count); | |
35 | } | |
36 | ||
37 | /* mmdrop drops the mm and the page tables */ | |
38 | extern void __mmdrop(struct mm_struct *); | |
39 | static inline void mmdrop(struct mm_struct *mm) | |
40 | { | |
41 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) | |
42 | __mmdrop(mm); | |
43 | } | |
44 | ||
45 | static inline void mmdrop_async_fn(struct work_struct *work) | |
46 | { | |
47 | struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); | |
48 | __mmdrop(mm); | |
49 | } | |
50 | ||
51 | static inline void mmdrop_async(struct mm_struct *mm) | |
52 | { | |
53 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) { | |
54 | INIT_WORK(&mm->async_put_work, mmdrop_async_fn); | |
55 | schedule_work(&mm->async_put_work); | |
56 | } | |
57 | } | |
58 | ||
59 | /** | |
60 | * mmget() - Pin the address space associated with a &struct mm_struct. | |
61 | * @mm: The address space to pin. | |
62 | * | |
63 | * Make sure that the address space of the given &struct mm_struct doesn't | |
64 | * go away. This does not protect against parts of the address space being | |
65 | * modified or freed, however. | |
66 | * | |
67 | * Never use this function to pin this address space for an | |
68 | * unbounded/indefinite amount of time. | |
69 | * | |
70 | * Use mmput() to release the reference acquired by mmget(). | |
71 | * | |
72 | * See also <Documentation/vm/active_mm.txt> for an in-depth explanation | |
73 | * of &mm_struct.mm_count vs &mm_struct.mm_users. | |
74 | */ | |
75 | static inline void mmget(struct mm_struct *mm) | |
76 | { | |
77 | atomic_inc(&mm->mm_users); | |
78 | } | |
79 | ||
80 | static inline bool mmget_not_zero(struct mm_struct *mm) | |
81 | { | |
82 | return atomic_inc_not_zero(&mm->mm_users); | |
83 | } | |
84 | ||
85 | /* mmput gets rid of the mappings and all user-space */ | |
86 | extern void mmput(struct mm_struct *); | |
68e21be2 IM |
87 | |
88 | /* Grab a reference to a task's mm, if it is not already going away */ | |
89 | extern struct mm_struct *get_task_mm(struct task_struct *task); | |
90 | /* | |
91 | * Grab a reference to a task's mm, if it is not already going away | |
92 | * and ptrace_may_access with the mode parameter passed to it | |
93 | * succeeds. | |
94 | */ | |
95 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); | |
96 | /* Remove the current tasks stale references to the old mm_struct */ | |
97 | extern void mm_release(struct task_struct *, struct mm_struct *); | |
98 | ||
4240c8bf IM |
99 | #ifdef CONFIG_MEMCG |
100 | extern void mm_update_next_owner(struct mm_struct *mm); | |
101 | #else | |
102 | static inline void mm_update_next_owner(struct mm_struct *mm) | |
103 | { | |
104 | } | |
105 | #endif /* CONFIG_MEMCG */ | |
106 | ||
107 | #ifdef CONFIG_MMU | |
108 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | |
109 | extern unsigned long | |
110 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | |
111 | unsigned long, unsigned long); | |
112 | extern unsigned long | |
113 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |
114 | unsigned long len, unsigned long pgoff, | |
115 | unsigned long flags); | |
116 | #else | |
117 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | |
118 | #endif | |
119 | ||
d026ce79 IM |
120 | static inline bool in_vfork(struct task_struct *tsk) |
121 | { | |
122 | bool ret; | |
123 | ||
124 | /* | |
125 | * need RCU to access ->real_parent if CLONE_VM was used along with | |
126 | * CLONE_PARENT. | |
127 | * | |
128 | * We check real_parent->mm == tsk->mm because CLONE_VFORK does not | |
129 | * imply CLONE_VM | |
130 | * | |
131 | * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus | |
132 | * ->real_parent is not necessarily the task doing vfork(), so in | |
133 | * theory we can't rely on task_lock() if we want to dereference it. | |
134 | * | |
135 | * And in this case we can't trust the real_parent->mm == tsk->mm | |
136 | * check, it can be false negative. But we do not care, if init or | |
137 | * another oom-unkillable task does this it should blame itself. | |
138 | */ | |
139 | rcu_read_lock(); | |
140 | ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; | |
141 | rcu_read_unlock(); | |
142 | ||
143 | return ret; | |
144 | } | |
145 | ||
7dea19f9 MH |
146 | /* |
147 | * Applies per-task gfp context to the given allocation flags. | |
148 | * PF_MEMALLOC_NOIO implies GFP_NOIO | |
149 | * PF_MEMALLOC_NOFS implies GFP_NOFS | |
74444eda | 150 | */ |
7dea19f9 | 151 | static inline gfp_t current_gfp_context(gfp_t flags) |
74444eda | 152 | { |
7dea19f9 MH |
153 | /* |
154 | * NOIO implies both NOIO and NOFS and it is a weaker context | |
155 | * so always make sure it makes precendence | |
156 | */ | |
74444eda IM |
157 | if (unlikely(current->flags & PF_MEMALLOC_NOIO)) |
158 | flags &= ~(__GFP_IO | __GFP_FS); | |
7dea19f9 MH |
159 | else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) |
160 | flags &= ~__GFP_FS; | |
74444eda IM |
161 | return flags; |
162 | } | |
163 | ||
d92a8cfc PZ |
164 | #ifdef CONFIG_LOCKDEP |
165 | extern void fs_reclaim_acquire(gfp_t gfp_mask); | |
166 | extern void fs_reclaim_release(gfp_t gfp_mask); | |
167 | #else | |
168 | static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } | |
169 | static inline void fs_reclaim_release(gfp_t gfp_mask) { } | |
170 | #endif | |
171 | ||
74444eda IM |
172 | static inline unsigned int memalloc_noio_save(void) |
173 | { | |
174 | unsigned int flags = current->flags & PF_MEMALLOC_NOIO; | |
175 | current->flags |= PF_MEMALLOC_NOIO; | |
176 | return flags; | |
177 | } | |
178 | ||
179 | static inline void memalloc_noio_restore(unsigned int flags) | |
180 | { | |
181 | current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; | |
182 | } | |
183 | ||
7dea19f9 MH |
184 | static inline unsigned int memalloc_nofs_save(void) |
185 | { | |
186 | unsigned int flags = current->flags & PF_MEMALLOC_NOFS; | |
187 | current->flags |= PF_MEMALLOC_NOFS; | |
188 | return flags; | |
189 | } | |
190 | ||
191 | static inline void memalloc_nofs_restore(unsigned int flags) | |
192 | { | |
193 | current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; | |
194 | } | |
195 | ||
499118e9 VB |
196 | static inline unsigned int memalloc_noreclaim_save(void) |
197 | { | |
198 | unsigned int flags = current->flags & PF_MEMALLOC; | |
199 | current->flags |= PF_MEMALLOC; | |
200 | return flags; | |
201 | } | |
202 | ||
203 | static inline void memalloc_noreclaim_restore(unsigned int flags) | |
204 | { | |
205 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; | |
206 | } | |
207 | ||
6e84f315 | 208 | #endif /* _LINUX_SCHED_MM_H */ |