]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_RMAP_H | |
2 | #define _LINUX_RMAP_H | |
3 | /* | |
4 | * Declarations for Reverse Mapping functions in mm/rmap.c | |
5 | */ | |
6 | ||
7 | #include <linux/list.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/memcontrol.h> | |
12 | ||
13 | /* | |
14 | * The anon_vma heads a list of private "related" vmas, to scan if | |
15 | * an anonymous page pointing to this anon_vma needs to be unmapped: | |
16 | * the vmas on the list will be related by forking, or by splitting. | |
17 | * | |
18 | * Since vmas come and go as they are split and merged (particularly | |
19 | * in mprotect), the mapping field of an anonymous page cannot point | |
20 | * directly to a vma: instead it points to an anon_vma, on whose list | |
21 | * the related vmas can be easily linked or unlinked. | |
22 | * | |
23 | * After unlinking the last vma on the list, we must garbage collect | |
24 | * the anon_vma object itself: we're guaranteed no page can be | |
25 | * pointing to this anon_vma once its vma list is empty. | |
26 | */ | |
27 | struct anon_vma { | |
28 | struct anon_vma *root; /* Root of this anon_vma tree */ | |
29 | spinlock_t lock; /* Serialize access to vma list */ | |
30 | /* | |
31 | * The refcount is taken on an anon_vma when there is no | |
32 | * guarantee that the vma of page tables will exist for | |
33 | * the duration of the operation. A caller that takes | |
34 | * the reference is responsible for clearing up the | |
35 | * anon_vma if they are the last user on release | |
36 | */ | |
37 | atomic_t refcount; | |
38 | ||
39 | /* | |
40 | * NOTE: the LSB of the head.next is set by | |
41 | * mm_take_all_locks() _after_ taking the above lock. So the | |
42 | * head must only be read/written after taking the above lock | |
43 | * to be sure to see a valid next pointer. The LSB bit itself | |
44 | * is serialized by a system wide lock only visible to | |
45 | * mm_take_all_locks() (mm_all_locks_mutex). | |
46 | */ | |
47 | struct list_head head; /* Chain of private "related" vmas */ | |
48 | }; | |
49 | ||
50 | /* | |
51 | * The copy-on-write semantics of fork mean that an anon_vma | |
52 | * can become associated with multiple processes. Furthermore, | |
53 | * each child process will have its own anon_vma, where new | |
54 | * pages for that process are instantiated. | |
55 | * | |
56 | * This structure allows us to find the anon_vmas associated | |
57 | * with a VMA, or the VMAs associated with an anon_vma. | |
58 | * The "same_vma" list contains the anon_vma_chains linking | |
59 | * all the anon_vmas associated with this VMA. | |
60 | * The "same_anon_vma" list contains the anon_vma_chains | |
61 | * which link all the VMAs associated with this anon_vma. | |
62 | */ | |
63 | struct anon_vma_chain { | |
64 | struct vm_area_struct *vma; | |
65 | struct anon_vma *anon_vma; | |
66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | |
67 | struct list_head same_anon_vma; /* locked by anon_vma->lock */ | |
68 | }; | |
69 | ||
70 | #ifdef CONFIG_MMU | |
71 | static inline void get_anon_vma(struct anon_vma *anon_vma) | |
72 | { | |
73 | atomic_inc(&anon_vma->refcount); | |
74 | } | |
75 | ||
76 | void __put_anon_vma(struct anon_vma *anon_vma); | |
77 | ||
78 | static inline void put_anon_vma(struct anon_vma *anon_vma) | |
79 | { | |
80 | if (atomic_dec_and_test(&anon_vma->refcount)) | |
81 | __put_anon_vma(anon_vma); | |
82 | } | |
83 | ||
84 | static inline struct anon_vma *page_anon_vma(struct page *page) | |
85 | { | |
86 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != | |
87 | PAGE_MAPPING_ANON) | |
88 | return NULL; | |
89 | return page_rmapping(page); | |
90 | } | |
91 | ||
92 | static inline void vma_lock_anon_vma(struct vm_area_struct *vma) | |
93 | { | |
94 | struct anon_vma *anon_vma = vma->anon_vma; | |
95 | if (anon_vma) | |
96 | spin_lock(&anon_vma->root->lock); | |
97 | } | |
98 | ||
99 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) | |
100 | { | |
101 | struct anon_vma *anon_vma = vma->anon_vma; | |
102 | if (anon_vma) | |
103 | spin_unlock(&anon_vma->root->lock); | |
104 | } | |
105 | ||
106 | static inline void anon_vma_lock(struct anon_vma *anon_vma) | |
107 | { | |
108 | spin_lock(&anon_vma->root->lock); | |
109 | } | |
110 | ||
111 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) | |
112 | { | |
113 | spin_unlock(&anon_vma->root->lock); | |
114 | } | |
115 | ||
116 | /* | |
117 | * anon_vma helper functions. | |
118 | */ | |
119 | void anon_vma_init(void); /* create anon_vma_cachep */ | |
120 | int anon_vma_prepare(struct vm_area_struct *); | |
121 | void unlink_anon_vmas(struct vm_area_struct *); | |
122 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | |
123 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | |
124 | void __anon_vma_link(struct vm_area_struct *); | |
125 | ||
126 | static inline void anon_vma_merge(struct vm_area_struct *vma, | |
127 | struct vm_area_struct *next) | |
128 | { | |
129 | VM_BUG_ON(vma->anon_vma != next->anon_vma); | |
130 | unlink_anon_vmas(next); | |
131 | } | |
132 | ||
133 | struct anon_vma *page_get_anon_vma(struct page *page); | |
134 | ||
135 | /* | |
136 | * rmap interfaces called when adding or removing pte of page | |
137 | */ | |
138 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | |
139 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | |
140 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, | |
141 | unsigned long, int); | |
142 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | |
143 | void page_add_file_rmap(struct page *); | |
144 | void page_remove_rmap(struct page *); | |
145 | ||
146 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, | |
147 | unsigned long); | |
148 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, | |
149 | unsigned long); | |
150 | ||
151 | static inline void page_dup_rmap(struct page *page) | |
152 | { | |
153 | atomic_inc(&page->_mapcount); | |
154 | } | |
155 | ||
156 | /* | |
157 | * Called from mm/vmscan.c to handle paging out | |
158 | */ | |
159 | int page_referenced(struct page *, int is_locked, | |
160 | struct mem_cgroup *cnt, unsigned long *vm_flags); | |
161 | int page_referenced_one(struct page *, struct vm_area_struct *, | |
162 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | |
163 | ||
164 | enum ttu_flags { | |
165 | TTU_UNMAP = 0, /* unmap mode */ | |
166 | TTU_MIGRATION = 1, /* migration mode */ | |
167 | TTU_MUNLOCK = 2, /* munlock mode */ | |
168 | TTU_ACTION_MASK = 0xff, | |
169 | ||
170 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | |
171 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | |
172 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | |
173 | }; | |
174 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | |
175 | ||
176 | bool is_vma_temporary_stack(struct vm_area_struct *vma); | |
177 | ||
178 | int try_to_unmap(struct page *, enum ttu_flags flags); | |
179 | int try_to_unmap_one(struct page *, struct vm_area_struct *, | |
180 | unsigned long address, enum ttu_flags flags); | |
181 | ||
182 | /* | |
183 | * Called from mm/filemap_xip.c to unmap empty zero page | |
184 | */ | |
185 | pte_t *__page_check_address(struct page *, struct mm_struct *, | |
186 | unsigned long, spinlock_t **, int); | |
187 | ||
188 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, | |
189 | unsigned long address, | |
190 | spinlock_t **ptlp, int sync) | |
191 | { | |
192 | pte_t *ptep; | |
193 | ||
194 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, | |
195 | ptlp, sync)); | |
196 | return ptep; | |
197 | } | |
198 | ||
199 | /* | |
200 | * Used by swapoff to help locate where page is expected in vma. | |
201 | */ | |
202 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |
203 | ||
204 | /* | |
205 | * Cleans the PTEs of shared mappings. | |
206 | * (and since clean PTEs should also be readonly, write protects them too) | |
207 | * | |
208 | * returns the number of cleaned PTEs. | |
209 | */ | |
210 | int page_mkclean(struct page *); | |
211 | ||
212 | /* | |
213 | * called in munlock()/munmap() path to check for other vmas holding | |
214 | * the page mlocked. | |
215 | */ | |
216 | int try_to_munlock(struct page *); | |
217 | ||
218 | /* | |
219 | * Called by memory-failure.c to kill processes. | |
220 | */ | |
221 | struct anon_vma *__page_lock_anon_vma(struct page *page); | |
222 | ||
223 | static inline struct anon_vma *page_lock_anon_vma(struct page *page) | |
224 | { | |
225 | struct anon_vma *anon_vma; | |
226 | ||
227 | __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page)); | |
228 | ||
229 | /* (void) is needed to make gcc happy */ | |
230 | (void) __cond_lock(&anon_vma->root->lock, anon_vma); | |
231 | ||
232 | return anon_vma; | |
233 | } | |
234 | ||
235 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | |
236 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); | |
237 | ||
238 | /* | |
239 | * Called by migrate.c to remove migration ptes, but might be used more later. | |
240 | */ | |
241 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | |
242 | struct vm_area_struct *, unsigned long, void *), void *arg); | |
243 | ||
244 | #else /* !CONFIG_MMU */ | |
245 | ||
246 | #define anon_vma_init() do {} while (0) | |
247 | #define anon_vma_prepare(vma) (0) | |
248 | #define anon_vma_link(vma) do {} while (0) | |
249 | ||
250 | static inline int page_referenced(struct page *page, int is_locked, | |
251 | struct mem_cgroup *cnt, | |
252 | unsigned long *vm_flags) | |
253 | { | |
254 | *vm_flags = 0; | |
255 | return 0; | |
256 | } | |
257 | ||
258 | #define try_to_unmap(page, refs) SWAP_FAIL | |
259 | ||
260 | static inline int page_mkclean(struct page *page) | |
261 | { | |
262 | return 0; | |
263 | } | |
264 | ||
265 | ||
266 | #endif /* CONFIG_MMU */ | |
267 | ||
268 | /* | |
269 | * Return values of try_to_unmap | |
270 | */ | |
271 | #define SWAP_SUCCESS 0 | |
272 | #define SWAP_AGAIN 1 | |
273 | #define SWAP_FAIL 2 | |
274 | #define SWAP_MLOCK 3 | |
275 | ||
276 | #endif /* _LINUX_RMAP_H */ |