]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
45e576b1 | 2 | /* |
45e576b1 MS |
3 | * Copyright IBM Corp. 2008 |
4 | * | |
5 | * Guest page hinting for unused pages. | |
6 | * | |
7 | * Author(s): Martin Schwidefsky <[email protected]> | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/mm.h> | |
c9b5ad54 | 14 | #include <linux/memblock.h> |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
45e576b1 | 16 | #include <linux/init.h> |
c9b5ad54 | 17 | #include <asm/facility.h> |
2d42f947 | 18 | #include <asm/page-states.h> |
45e576b1 | 19 | |
2ddddf3e | 20 | static int cmma_flag = 1; |
45e576b1 MS |
21 | |
22 | static int __init cmma(char *str) | |
23 | { | |
5f0917a2 | 24 | bool enabled; |
2ddddf3e | 25 | |
5f0917a2 VG |
26 | if (!kstrtobool(str, &enabled)) |
27 | cmma_flag = enabled; | |
28 | return 1; | |
45e576b1 | 29 | } |
45e576b1 MS |
30 | __setup("cmma=", cmma); |
31 | ||
931641c6 | 32 | static inline int cmma_test_essa(void) |
45e576b1 MS |
33 | { |
34 | register unsigned long tmp asm("0") = 0; | |
c9b5ad54 | 35 | register int rc asm("1"); |
45e576b1 | 36 | |
c9b5ad54 | 37 | /* test ESSA_GET_STATE */ |
45e576b1 | 38 | asm volatile( |
c9b5ad54 | 39 | " .insn rrf,0xb9ab0000,%1,%1,%2,0\n" |
45e576b1 MS |
40 | "0: la %0,0\n" |
41 | "1:\n" | |
42 | EX_TABLE(0b,1b) | |
c9b5ad54 MS |
43 | : "=&d" (rc), "+&d" (tmp) |
44 | : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP)); | |
931641c6 HC |
45 | return rc; |
46 | } | |
47 | ||
48 | void __init cmma_init(void) | |
49 | { | |
50 | if (!cmma_flag) | |
51 | return; | |
c9b5ad54 | 52 | if (cmma_test_essa()) { |
45e576b1 | 53 | cmma_flag = 0; |
c9b5ad54 MS |
54 | return; |
55 | } | |
56 | if (test_facility(147)) | |
57 | cmma_flag = 2; | |
45e576b1 MS |
58 | } |
59 | ||
c9b5ad54 MS |
60 | static inline unsigned char get_page_state(struct page *page) |
61 | { | |
62 | unsigned char state; | |
63 | ||
64 | asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0" | |
65 | : "=&d" (state) | |
66 | : "a" (page_to_phys(page)), | |
67 | "i" (ESSA_GET_STATE)); | |
68 | return state & 0x3f; | |
69 | } | |
70 | ||
71 | static inline void set_page_unused(struct page *page, int order) | |
45e576b1 MS |
72 | { |
73 | int i, rc; | |
74 | ||
45e576b1 MS |
75 | for (i = 0; i < (1 << order); i++) |
76 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | |
77 | : "=&d" (rc) | |
846955c8 | 78 | : "a" (page_to_phys(page + i)), |
45e576b1 MS |
79 | "i" (ESSA_SET_UNUSED)); |
80 | } | |
81 | ||
c9b5ad54 | 82 | static inline void set_page_stable_dat(struct page *page, int order) |
45e576b1 | 83 | { |
c9b5ad54 MS |
84 | int i, rc; |
85 | ||
86 | for (i = 0; i < (1 << order); i++) | |
87 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | |
88 | : "=&d" (rc) | |
89 | : "a" (page_to_phys(page + i)), | |
90 | "i" (ESSA_SET_STABLE)); | |
846955c8 HC |
91 | } |
92 | ||
c9b5ad54 | 93 | static inline void set_page_stable_nodat(struct page *page, int order) |
846955c8 HC |
94 | { |
95 | int i, rc; | |
96 | ||
45e576b1 MS |
97 | for (i = 0; i < (1 << order); i++) |
98 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | |
99 | : "=&d" (rc) | |
846955c8 | 100 | : "a" (page_to_phys(page + i)), |
c9b5ad54 MS |
101 | "i" (ESSA_SET_STABLE_NODAT)); |
102 | } | |
103 | ||
104 | static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) | |
105 | { | |
106 | unsigned long next; | |
107 | struct page *page; | |
108 | pmd_t *pmd; | |
109 | ||
110 | pmd = pmd_offset(pud, addr); | |
111 | do { | |
112 | next = pmd_addr_end(addr, end); | |
113 | if (pmd_none(*pmd) || pmd_large(*pmd)) | |
114 | continue; | |
115 | page = virt_to_page(pmd_val(*pmd)); | |
116 | set_bit(PG_arch_1, &page->flags); | |
117 | } while (pmd++, addr = next, addr != end); | |
118 | } | |
119 | ||
120 | static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) | |
121 | { | |
122 | unsigned long next; | |
123 | struct page *page; | |
124 | pud_t *pud; | |
125 | int i; | |
126 | ||
127 | pud = pud_offset(p4d, addr); | |
128 | do { | |
129 | next = pud_addr_end(addr, end); | |
130 | if (pud_none(*pud) || pud_large(*pud)) | |
131 | continue; | |
132 | if (!pud_folded(*pud)) { | |
133 | page = virt_to_page(pud_val(*pud)); | |
134 | for (i = 0; i < 3; i++) | |
135 | set_bit(PG_arch_1, &page[i].flags); | |
136 | } | |
137 | mark_kernel_pmd(pud, addr, next); | |
138 | } while (pud++, addr = next, addr != end); | |
139 | } | |
140 | ||
141 | static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) | |
142 | { | |
143 | unsigned long next; | |
144 | struct page *page; | |
145 | p4d_t *p4d; | |
146 | int i; | |
147 | ||
148 | p4d = p4d_offset(pgd, addr); | |
149 | do { | |
150 | next = p4d_addr_end(addr, end); | |
151 | if (p4d_none(*p4d)) | |
152 | continue; | |
153 | if (!p4d_folded(*p4d)) { | |
154 | page = virt_to_page(p4d_val(*p4d)); | |
155 | for (i = 0; i < 3; i++) | |
156 | set_bit(PG_arch_1, &page[i].flags); | |
157 | } | |
158 | mark_kernel_pud(p4d, addr, next); | |
159 | } while (p4d++, addr = next, addr != end); | |
160 | } | |
161 | ||
162 | static void mark_kernel_pgd(void) | |
163 | { | |
164 | unsigned long addr, next; | |
165 | struct page *page; | |
166 | pgd_t *pgd; | |
167 | int i; | |
168 | ||
169 | addr = 0; | |
170 | pgd = pgd_offset_k(addr); | |
171 | do { | |
172 | next = pgd_addr_end(addr, MODULES_END); | |
173 | if (pgd_none(*pgd)) | |
174 | continue; | |
175 | if (!pgd_folded(*pgd)) { | |
176 | page = virt_to_page(pgd_val(*pgd)); | |
177 | for (i = 0; i < 3; i++) | |
178 | set_bit(PG_arch_1, &page[i].flags); | |
179 | } | |
180 | mark_kernel_p4d(pgd, addr, next); | |
181 | } while (pgd++, addr = next, addr != MODULES_END); | |
182 | } | |
183 | ||
184 | void __init cmma_init_nodat(void) | |
185 | { | |
c9b5ad54 MS |
186 | struct page *page; |
187 | unsigned long start, end, ix; | |
c9118e6c | 188 | int i; |
c9b5ad54 MS |
189 | |
190 | if (cmma_flag < 2) | |
191 | return; | |
192 | /* Mark pages used in kernel page tables */ | |
193 | mark_kernel_pgd(); | |
194 | ||
195 | /* Set all kernel pages not used for page tables to stable/no-dat */ | |
c9118e6c | 196 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
c9b5ad54 MS |
197 | page = pfn_to_page(start); |
198 | for (ix = start; ix < end; ix++, page++) { | |
199 | if (__test_and_clear_bit(PG_arch_1, &page->flags)) | |
200 | continue; /* skip page table pages */ | |
201 | if (!list_empty(&page->lru)) | |
202 | continue; /* skip free pages */ | |
203 | set_page_stable_nodat(page, 0); | |
204 | } | |
205 | } | |
206 | } | |
207 | ||
208 | void arch_free_page(struct page *page, int order) | |
209 | { | |
210 | if (!cmma_flag) | |
211 | return; | |
212 | set_page_unused(page, order); | |
45e576b1 | 213 | } |
846955c8 HC |
214 | |
215 | void arch_alloc_page(struct page *page, int order) | |
216 | { | |
217 | if (!cmma_flag) | |
218 | return; | |
c9b5ad54 MS |
219 | if (cmma_flag < 2) |
220 | set_page_stable_dat(page, order); | |
221 | else | |
222 | set_page_stable_nodat(page, order); | |
223 | } | |
224 | ||
225 | void arch_set_page_dat(struct page *page, int order) | |
226 | { | |
227 | if (!cmma_flag) | |
228 | return; | |
229 | set_page_stable_dat(page, order); | |
230 | } | |
231 | ||
232 | void arch_set_page_nodat(struct page *page, int order) | |
233 | { | |
234 | if (cmma_flag < 2) | |
235 | return; | |
236 | set_page_stable_nodat(page, order); | |
237 | } | |
238 | ||
239 | int arch_test_page_nodat(struct page *page) | |
240 | { | |
241 | unsigned char state; | |
242 | ||
243 | if (cmma_flag < 2) | |
244 | return 0; | |
245 | state = get_page_state(page); | |
246 | return !!(state & 0x20); | |
846955c8 HC |
247 | } |
248 | ||
249 | void arch_set_page_states(int make_stable) | |
250 | { | |
251 | unsigned long flags, order, t; | |
252 | struct list_head *l; | |
253 | struct page *page; | |
254 | struct zone *zone; | |
255 | ||
256 | if (!cmma_flag) | |
257 | return; | |
258 | if (make_stable) | |
259 | drain_local_pages(NULL); | |
260 | for_each_populated_zone(zone) { | |
261 | spin_lock_irqsave(&zone->lock, flags); | |
262 | for_each_migratetype_order(order, t) { | |
263 | list_for_each(l, &zone->free_area[order].free_list[t]) { | |
264 | page = list_entry(l, struct page, lru); | |
265 | if (make_stable) | |
37a366fa | 266 | set_page_stable_dat(page, order); |
846955c8 | 267 | else |
c9b5ad54 | 268 | set_page_unused(page, order); |
846955c8 HC |
269 | } |
270 | } | |
271 | spin_unlock_irqrestore(&zone->lock, flags); | |
272 | } | |
273 | } |