]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_RMAP_H |
2 | #define _LINUX_RMAP_H | |
3 | /* | |
4 | * Declarations for Reverse Mapping functions in mm/rmap.c | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/list.h> |
8 | #include <linux/slab.h> | |
9 | #include <linux/mm.h> | |
5a505085 | 10 | #include <linux/rwsem.h> |
bed7161a | 11 | #include <linux/memcontrol.h> |
1da177e4 LT |
12 | |
13 | /* | |
14 | * The anon_vma heads a list of private "related" vmas, to scan if | |
15 | * an anonymous page pointing to this anon_vma needs to be unmapped: | |
16 | * the vmas on the list will be related by forking, or by splitting. | |
17 | * | |
18 | * Since vmas come and go as they are split and merged (particularly | |
19 | * in mprotect), the mapping field of an anonymous page cannot point | |
20 | * directly to a vma: instead it points to an anon_vma, on whose list | |
21 | * the related vmas can be easily linked or unlinked. | |
22 | * | |
23 | * After unlinking the last vma on the list, we must garbage collect | |
24 | * the anon_vma object itself: we're guaranteed no page can be | |
25 | * pointing to this anon_vma once its vma list is empty. | |
26 | */ | |
27 | struct anon_vma { | |
5a505085 IM |
28 | struct anon_vma *root; /* Root of this anon_vma tree */ |
29 | struct rw_semaphore rwsem; /* W: modification, R: walking the list */ | |
7f60c214 | 30 | /* |
83813267 | 31 | * The refcount is taken on an anon_vma when there is no |
7f60c214 MG |
32 | * guarantee that the vma of page tables will exist for |
33 | * the duration of the operation. A caller that takes | |
34 | * the reference is responsible for clearing up the | |
35 | * anon_vma if they are the last user on release | |
36 | */ | |
83813267 PZ |
37 | atomic_t refcount; |
38 | ||
7a3ef208 KK |
39 | /* |
40 | * Count of child anon_vmas and VMAs which points to this anon_vma. | |
41 | * | |
42 | * This counter is used for making decision about reusing anon_vma | |
43 | * instead of forking new one. See comments in function anon_vma_clone. | |
44 | */ | |
45 | unsigned degree; | |
46 | ||
47 | struct anon_vma *parent; /* Parent of this anon_vma */ | |
48 | ||
7906d00c | 49 | /* |
bf181b9f | 50 | * NOTE: the LSB of the rb_root.rb_node is set by |
7906d00c | 51 | * mm_take_all_locks() _after_ taking the above lock. So the |
bf181b9f | 52 | * rb_root must only be read/written after taking the above lock |
7906d00c AA |
53 | * to be sure to see a valid next pointer. The LSB bit itself |
54 | * is serialized by a system wide lock only visible to | |
55 | * mm_take_all_locks() (mm_all_locks_mutex). | |
56 | */ | |
bf181b9f | 57 | struct rb_root rb_root; /* Interval tree of private "related" vmas */ |
5beb4930 RR |
58 | }; |
59 | ||
60 | /* | |
61 | * The copy-on-write semantics of fork mean that an anon_vma | |
62 | * can become associated with multiple processes. Furthermore, | |
63 | * each child process will have its own anon_vma, where new | |
64 | * pages for that process are instantiated. | |
65 | * | |
66 | * This structure allows us to find the anon_vmas associated | |
67 | * with a VMA, or the VMAs associated with an anon_vma. | |
68 | * The "same_vma" list contains the anon_vma_chains linking | |
69 | * all the anon_vmas associated with this VMA. | |
bf181b9f | 70 | * The "rb" field indexes on an interval tree the anon_vma_chains |
5beb4930 RR |
71 | * which link all the VMAs associated with this anon_vma. |
72 | */ | |
73 | struct anon_vma_chain { | |
74 | struct vm_area_struct *vma; | |
75 | struct anon_vma *anon_vma; | |
76 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | |
5a505085 | 77 | struct rb_node rb; /* locked by anon_vma->rwsem */ |
bf181b9f | 78 | unsigned long rb_subtree_last; |
ed8ea815 ML |
79 | #ifdef CONFIG_DEBUG_VM_RB |
80 | unsigned long cached_vma_start, cached_vma_last; | |
81 | #endif | |
1da177e4 LT |
82 | }; |
83 | ||
02c6de8d | 84 | enum ttu_flags { |
daa5ba76 KK |
85 | TTU_UNMAP = 1, /* unmap mode */ |
86 | TTU_MIGRATION = 2, /* migration mode */ | |
87 | TTU_MUNLOCK = 4, /* munlock mode */ | |
854e9ed0 | 88 | TTU_LZFREE = 8, /* lazy free mode */ |
2a52bcbc | 89 | TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */ |
02c6de8d MK |
90 | |
91 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | |
92 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | |
93 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | |
72b252ae MG |
94 | TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible |
95 | * and caller guarantees they will | |
96 | * do a final flush if necessary */ | |
2a52bcbc KS |
97 | TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock: |
98 | * caller holds it */ | |
02c6de8d MK |
99 | }; |
100 | ||
1da177e4 | 101 | #ifdef CONFIG_MMU |
76545066 RR |
102 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
103 | { | |
83813267 | 104 | atomic_inc(&anon_vma->refcount); |
76545066 RR |
105 | } |
106 | ||
01d8b20d PZ |
107 | void __put_anon_vma(struct anon_vma *anon_vma); |
108 | ||
109 | static inline void put_anon_vma(struct anon_vma *anon_vma) | |
110 | { | |
111 | if (atomic_dec_and_test(&anon_vma->refcount)) | |
112 | __put_anon_vma(anon_vma); | |
113 | } | |
1da177e4 | 114 | |
4fc3f1d6 | 115 | static inline void anon_vma_lock_write(struct anon_vma *anon_vma) |
cba48b98 | 116 | { |
5a505085 | 117 | down_write(&anon_vma->root->rwsem); |
cba48b98 RR |
118 | } |
119 | ||
08b52706 | 120 | static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) |
cba48b98 | 121 | { |
5a505085 | 122 | up_write(&anon_vma->root->rwsem); |
cba48b98 RR |
123 | } |
124 | ||
4fc3f1d6 IM |
125 | static inline void anon_vma_lock_read(struct anon_vma *anon_vma) |
126 | { | |
127 | down_read(&anon_vma->root->rwsem); | |
128 | } | |
129 | ||
130 | static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) | |
131 | { | |
132 | up_read(&anon_vma->root->rwsem); | |
133 | } | |
134 | ||
135 | ||
1da177e4 LT |
136 | /* |
137 | * anon_vma helper functions. | |
138 | */ | |
139 | void anon_vma_init(void); /* create anon_vma_cachep */ | |
140 | int anon_vma_prepare(struct vm_area_struct *); | |
5beb4930 RR |
141 | void unlink_anon_vmas(struct vm_area_struct *); |
142 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | |
143 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | |
1da177e4 | 144 | |
5beb4930 RR |
145 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
146 | struct vm_area_struct *next) | |
147 | { | |
81d1b09c | 148 | VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); |
5beb4930 RR |
149 | unlink_anon_vmas(next); |
150 | } | |
151 | ||
01d8b20d PZ |
152 | struct anon_vma *page_get_anon_vma(struct page *page); |
153 | ||
d281ee61 KS |
154 | /* bitflags for do_page_add_anon_rmap() */ |
155 | #define RMAP_EXCLUSIVE 0x01 | |
156 | #define RMAP_COMPOUND 0x02 | |
157 | ||
1da177e4 LT |
158 | /* |
159 | * rmap interfaces called when adding or removing pte of page | |
160 | */ | |
5a49973d | 161 | void page_move_anon_rmap(struct page *, struct vm_area_struct *); |
d281ee61 KS |
162 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, |
163 | unsigned long, bool); | |
ad8c2ee8 RR |
164 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
165 | unsigned long, int); | |
d281ee61 KS |
166 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
167 | unsigned long, bool); | |
dd78fedd | 168 | void page_add_file_rmap(struct page *, bool); |
d281ee61 | 169 | void page_remove_rmap(struct page *, bool); |
1da177e4 | 170 | |
0fe6e20b NH |
171 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, |
172 | unsigned long); | |
173 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, | |
174 | unsigned long); | |
175 | ||
53f9263b | 176 | static inline void page_dup_rmap(struct page *page, bool compound) |
1da177e4 | 177 | { |
53f9263b | 178 | atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
1da177e4 LT |
179 | } |
180 | ||
181 | /* | |
182 | * Called from mm/vmscan.c to handle paging out | |
183 | */ | |
6fe6b7e3 | 184 | int page_referenced(struct page *, int is_locked, |
72835c86 | 185 | struct mem_cgroup *memcg, unsigned long *vm_flags); |
5ad64688 | 186 | |
14fa31b8 AK |
187 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
188 | ||
189 | int try_to_unmap(struct page *, enum ttu_flags flags); | |
1da177e4 | 190 | |
ceffc078 | 191 | /* |
e748dcd0 | 192 | * Used by uprobes to replace a userspace page safely |
ceffc078 | 193 | */ |
e9a81a82 | 194 | pte_t *__page_check_address(struct page *, struct mm_struct *, |
479db0bf | 195 | unsigned long, spinlock_t **, int); |
ceffc078 | 196 | |
e9a81a82 NK |
197 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, |
198 | unsigned long address, | |
199 | spinlock_t **ptlp, int sync) | |
200 | { | |
201 | pte_t *ptep; | |
202 | ||
203 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, | |
204 | ptlp, sync)); | |
205 | return ptep; | |
206 | } | |
207 | ||
8749cfea VD |
208 | /* |
209 | * Used by idle page tracking to check if a page was referenced via page | |
210 | * tables. | |
211 | */ | |
212 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
213 | bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, | |
214 | unsigned long address, pmd_t **pmdp, | |
215 | pte_t **ptep, spinlock_t **ptlp); | |
216 | #else | |
217 | static inline bool page_check_address_transhuge(struct page *page, | |
218 | struct mm_struct *mm, unsigned long address, | |
219 | pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) | |
220 | { | |
221 | *ptep = page_check_address(page, mm, address, ptlp, 0); | |
222 | *pmdp = NULL; | |
223 | return !!*ptep; | |
224 | } | |
225 | #endif | |
226 | ||
1da177e4 LT |
227 | /* |
228 | * Used by swapoff to help locate where page is expected in vma. | |
229 | */ | |
230 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |
231 | ||
d08b3851 PZ |
232 | /* |
233 | * Cleans the PTEs of shared mappings. | |
234 | * (and since clean PTEs should also be readonly, write protects them too) | |
235 | * | |
236 | * returns the number of cleaned PTEs. | |
237 | */ | |
238 | int page_mkclean(struct page *); | |
239 | ||
b291f000 NP |
240 | /* |
241 | * called in munlock()/munmap() path to check for other vmas holding | |
242 | * the page mlocked. | |
243 | */ | |
244 | int try_to_munlock(struct page *); | |
b291f000 | 245 | |
e388466d KS |
246 | void remove_migration_ptes(struct page *old, struct page *new, bool locked); |
247 | ||
10be22df AK |
248 | /* |
249 | * Called by memory-failure.c to kill processes. | |
250 | */ | |
4fc3f1d6 IM |
251 | struct anon_vma *page_lock_anon_vma_read(struct page *page); |
252 | void page_unlock_anon_vma_read(struct anon_vma *anon_vma); | |
6a46079c | 253 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
10be22df | 254 | |
0dd1c7bb JK |
255 | /* |
256 | * rmap_walk_control: To control rmap traversing for specific needs | |
257 | * | |
258 | * arg: passed to rmap_one() and invalid_vma() | |
259 | * rmap_one: executed on each vma where page is mapped | |
260 | * done: for checking traversing termination condition | |
0dd1c7bb JK |
261 | * anon_lock: for getting anon_lock by optimized way rather than default |
262 | * invalid_vma: for skipping uninterested vma | |
263 | */ | |
051ac83a JK |
264 | struct rmap_walk_control { |
265 | void *arg; | |
266 | int (*rmap_one)(struct page *page, struct vm_area_struct *vma, | |
267 | unsigned long addr, void *arg); | |
0dd1c7bb | 268 | int (*done)(struct page *page); |
0dd1c7bb JK |
269 | struct anon_vma *(*anon_lock)(struct page *page); |
270 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); | |
051ac83a JK |
271 | }; |
272 | ||
051ac83a | 273 | int rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
b9773199 | 274 | int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); |
e9995ef9 | 275 | |
1da177e4 LT |
276 | #else /* !CONFIG_MMU */ |
277 | ||
278 | #define anon_vma_init() do {} while (0) | |
279 | #define anon_vma_prepare(vma) (0) | |
280 | #define anon_vma_link(vma) do {} while (0) | |
281 | ||
01ff53f4 | 282 | static inline int page_referenced(struct page *page, int is_locked, |
72835c86 | 283 | struct mem_cgroup *memcg, |
01ff53f4 MF |
284 | unsigned long *vm_flags) |
285 | { | |
286 | *vm_flags = 0; | |
64574746 | 287 | return 0; |
01ff53f4 MF |
288 | } |
289 | ||
a48d07af | 290 | #define try_to_unmap(page, refs) SWAP_FAIL |
1da177e4 | 291 | |
d08b3851 PZ |
292 | static inline int page_mkclean(struct page *page) |
293 | { | |
294 | return 0; | |
295 | } | |
296 | ||
297 | ||
1da177e4 LT |
298 | #endif /* CONFIG_MMU */ |
299 | ||
300 | /* | |
301 | * Return values of try_to_unmap | |
302 | */ | |
303 | #define SWAP_SUCCESS 0 | |
304 | #define SWAP_AGAIN 1 | |
305 | #define SWAP_FAIL 2 | |
b291f000 | 306 | #define SWAP_MLOCK 3 |
854e9ed0 | 307 | #define SWAP_LZFREE 4 |
1da177e4 LT |
308 | |
309 | #endif /* _LINUX_RMAP_H */ |