]>
Commit | Line | Data |
---|---|---|
f8af4da3 HD |
1 | #ifndef __LINUX_KSM_H |
2 | #define __LINUX_KSM_H | |
3 | /* | |
4 | * Memory merging support. | |
5 | * | |
6 | * This code enables dynamic sharing of identical pages found in different | |
7 | * memory areas, even if they are not shared by fork(). | |
8 | */ | |
9 | ||
10 | #include <linux/bitops.h> | |
11 | #include <linux/mm.h> | |
5ad64688 HD |
12 | #include <linux/pagemap.h> |
13 | #include <linux/rmap.h> | |
f8af4da3 HD |
14 | #include <linux/sched.h> |
15 | ||
08beca44 | 16 | struct stable_node; |
5ad64688 | 17 | struct mem_cgroup; |
08beca44 | 18 | |
f8af4da3 HD |
19 | #ifdef CONFIG_KSM |
20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
21 | unsigned long end, int advice, unsigned long *vm_flags); | |
22 | int __ksm_enter(struct mm_struct *mm); | |
1c2fb7a4 | 23 | void __ksm_exit(struct mm_struct *mm); |
f8af4da3 HD |
24 | |
25 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
26 | { | |
27 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | |
28 | return __ksm_enter(mm); | |
29 | return 0; | |
30 | } | |
31 | ||
1c2fb7a4 | 32 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
33 | { |
34 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
1c2fb7a4 | 35 | __ksm_exit(mm); |
f8af4da3 | 36 | } |
9a840895 HD |
37 | |
38 | /* | |
39 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
40 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
08beca44 HD |
41 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any |
42 | * anon_vma, but to that page's node of the stable tree. | |
9a840895 HD |
43 | */ |
44 | static inline int PageKsm(struct page *page) | |
45 | { | |
3ca7b3c5 HD |
46 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
47 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | |
9a840895 HD |
48 | } |
49 | ||
08beca44 HD |
50 | static inline struct stable_node *page_stable_node(struct page *page) |
51 | { | |
52 | return PageKsm(page) ? page_rmapping(page) : NULL; | |
53 | } | |
54 | ||
55 | static inline void set_page_stable_node(struct page *page, | |
56 | struct stable_node *stable_node) | |
57 | { | |
58 | page->mapping = (void *)stable_node + | |
59 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | |
60 | } | |
61 | ||
5ad64688 HD |
62 | /* |
63 | * When do_swap_page() first faults in from swap what used to be a KSM page, | |
64 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, | |
65 | * it might be faulted into a different anon_vma (or perhaps to a different | |
66 | * offset in the same anon_vma). do_swap_page() cannot do all the locking | |
67 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make | |
68 | * a copy, and leave remerging the pages to a later pass of ksmd. | |
69 | * | |
70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | |
71 | * but what if the vma was unmerged while the page was swapped out? | |
72 | */ | |
73 | struct page *ksm_does_need_to_copy(struct page *page, | |
74 | struct vm_area_struct *vma, unsigned long address); | |
75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | |
76 | struct vm_area_struct *vma, unsigned long address) | |
9a840895 | 77 | { |
5ad64688 HD |
78 | struct anon_vma *anon_vma = page_anon_vma(page); |
79 | ||
80 | if (!anon_vma || | |
ba6f0ff3 | 81 | (anon_vma->root == vma->anon_vma->root && |
5ad64688 HD |
82 | page->index == linear_page_index(vma, address))) |
83 | return page; | |
84 | ||
85 | return ksm_does_need_to_copy(page, vma, address); | |
9a840895 | 86 | } |
5ad64688 HD |
87 | |
88 | int page_referenced_ksm(struct page *page, | |
89 | struct mem_cgroup *memcg, unsigned long *vm_flags); | |
90 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); | |
e9995ef9 HD |
91 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, |
92 | struct vm_area_struct *, unsigned long, void *), void *arg); | |
93 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | |
5ad64688 | 94 | |
f8af4da3 HD |
95 | #else /* !CONFIG_KSM */ |
96 | ||
f8af4da3 HD |
97 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
98 | { | |
99 | return 0; | |
100 | } | |
101 | ||
1c2fb7a4 | 102 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
103 | { |
104 | } | |
9a840895 HD |
105 | |
106 | static inline int PageKsm(struct page *page) | |
107 | { | |
108 | return 0; | |
109 | } | |
110 | ||
f42647ac HD |
111 | #ifdef CONFIG_MMU |
112 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
113 | unsigned long end, int advice, unsigned long *vm_flags) | |
114 | { | |
115 | return 0; | |
116 | } | |
117 | ||
5ad64688 HD |
118 | static inline struct page *ksm_might_need_to_copy(struct page *page, |
119 | struct vm_area_struct *vma, unsigned long address) | |
120 | { | |
121 | return page; | |
122 | } | |
123 | ||
124 | static inline int page_referenced_ksm(struct page *page, | |
125 | struct mem_cgroup *memcg, unsigned long *vm_flags) | |
126 | { | |
127 | return 0; | |
128 | } | |
129 | ||
130 | static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | |
131 | { | |
132 | return 0; | |
133 | } | |
e9995ef9 HD |
134 | |
135 | static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, | |
136 | struct vm_area_struct *, unsigned long, void *), void *arg) | |
137 | { | |
138 | return 0; | |
139 | } | |
140 | ||
141 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) | |
142 | { | |
143 | } | |
f42647ac | 144 | #endif /* CONFIG_MMU */ |
f8af4da3 HD |
145 | #endif /* !CONFIG_KSM */ |
146 | ||
5ad64688 | 147 | #endif /* __LINUX_KSM_H */ |