]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_RMAP_H |
2 | #define _LINUX_RMAP_H | |
3 | /* | |
4 | * Declarations for Reverse Mapping functions in mm/rmap.c | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/list.h> |
8 | #include <linux/slab.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/spinlock.h> | |
11 | ||
12 | /* | |
13 | * The anon_vma heads a list of private "related" vmas, to scan if | |
14 | * an anonymous page pointing to this anon_vma needs to be unmapped: | |
15 | * the vmas on the list will be related by forking, or by splitting. | |
16 | * | |
17 | * Since vmas come and go as they are split and merged (particularly | |
18 | * in mprotect), the mapping field of an anonymous page cannot point | |
19 | * directly to a vma: instead it points to an anon_vma, on whose list | |
20 | * the related vmas can be easily linked or unlinked. | |
21 | * | |
22 | * After unlinking the last vma on the list, we must garbage collect | |
23 | * the anon_vma object itself: we're guaranteed no page can be | |
24 | * pointing to this anon_vma once its vma list is empty. | |
25 | */ | |
26 | struct anon_vma { | |
27 | spinlock_t lock; /* Serialize access to vma list */ | |
28 | struct list_head head; /* List of private "related" vmas */ | |
29 | }; | |
30 | ||
31 | #ifdef CONFIG_MMU | |
32 | ||
e18b890b | 33 | extern struct kmem_cache *anon_vma_cachep; |
1da177e4 LT |
34 | |
35 | static inline struct anon_vma *anon_vma_alloc(void) | |
36 | { | |
e94b1766 | 37 | return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); |
1da177e4 LT |
38 | } |
39 | ||
40 | static inline void anon_vma_free(struct anon_vma *anon_vma) | |
41 | { | |
42 | kmem_cache_free(anon_vma_cachep, anon_vma); | |
43 | } | |
44 | ||
45 | static inline void anon_vma_lock(struct vm_area_struct *vma) | |
46 | { | |
47 | struct anon_vma *anon_vma = vma->anon_vma; | |
48 | if (anon_vma) | |
49 | spin_lock(&anon_vma->lock); | |
50 | } | |
51 | ||
52 | static inline void anon_vma_unlock(struct vm_area_struct *vma) | |
53 | { | |
54 | struct anon_vma *anon_vma = vma->anon_vma; | |
55 | if (anon_vma) | |
56 | spin_unlock(&anon_vma->lock); | |
57 | } | |
58 | ||
59 | /* | |
60 | * anon_vma helper functions. | |
61 | */ | |
62 | void anon_vma_init(void); /* create anon_vma_cachep */ | |
63 | int anon_vma_prepare(struct vm_area_struct *); | |
64 | void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); | |
65 | void anon_vma_unlink(struct vm_area_struct *); | |
66 | void anon_vma_link(struct vm_area_struct *); | |
67 | void __anon_vma_link(struct vm_area_struct *); | |
68 | ||
69 | /* | |
70 | * rmap interfaces called when adding or removing pte of page | |
71 | */ | |
72 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | |
9617d95e | 73 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
1da177e4 | 74 | void page_add_file_rmap(struct page *); |
7de6b805 | 75 | void page_remove_rmap(struct page *, struct vm_area_struct *); |
1da177e4 LT |
76 | |
77 | /** | |
78 | * page_dup_rmap - duplicate pte mapping to a page | |
79 | * @page: the page to add the mapping to | |
80 | * | |
81 | * For copy_page_range only: minimal extract from page_add_rmap, | |
82 | * avoiding unnecessary tests (already checked) so it's quicker. | |
83 | */ | |
84 | static inline void page_dup_rmap(struct page *page) | |
85 | { | |
86 | atomic_inc(&page->_mapcount); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Called from mm/vmscan.c to handle paging out | |
91 | */ | |
f7b7fd8f | 92 | int page_referenced(struct page *, int is_locked); |
a48d07af | 93 | int try_to_unmap(struct page *, int ignore_refs); |
1da177e4 | 94 | |
ceffc078 CO |
95 | /* |
96 | * Called from mm/filemap_xip.c to unmap empty zero page | |
97 | */ | |
c0718806 HD |
98 | pte_t *page_check_address(struct page *, struct mm_struct *, |
99 | unsigned long, spinlock_t **); | |
ceffc078 | 100 | |
1da177e4 LT |
101 | /* |
102 | * Used by swapoff to help locate where page is expected in vma. | |
103 | */ | |
104 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |
105 | ||
d08b3851 PZ |
106 | /* |
107 | * Cleans the PTEs of shared mappings. | |
108 | * (and since clean PTEs should also be readonly, write protects them too) | |
109 | * | |
110 | * returns the number of cleaned PTEs. | |
111 | */ | |
112 | int page_mkclean(struct page *); | |
113 | ||
1da177e4 LT |
114 | #else /* !CONFIG_MMU */ |
115 | ||
116 | #define anon_vma_init() do {} while (0) | |
117 | #define anon_vma_prepare(vma) (0) | |
118 | #define anon_vma_link(vma) do {} while (0) | |
119 | ||
f7b7fd8f | 120 | #define page_referenced(page,l) TestClearPageReferenced(page) |
a48d07af | 121 | #define try_to_unmap(page, refs) SWAP_FAIL |
1da177e4 | 122 | |
d08b3851 PZ |
123 | static inline int page_mkclean(struct page *page) |
124 | { | |
125 | return 0; | |
126 | } | |
127 | ||
128 | ||
1da177e4 LT |
129 | #endif /* CONFIG_MMU */ |
130 | ||
131 | /* | |
132 | * Return values of try_to_unmap | |
133 | */ | |
134 | #define SWAP_SUCCESS 0 | |
135 | #define SWAP_AGAIN 1 | |
136 | #define SWAP_FAIL 2 | |
137 | ||
138 | #endif /* _LINUX_RMAP_H */ |