]>
Commit | Line | Data |
---|---|---|
a5d76b54 KH |
1 | /* |
2 | * linux/mm/page_isolation.c | |
3 | */ | |
4 | ||
a5d76b54 KH |
5 | #include <linux/mm.h> |
6 | #include <linux/page-isolation.h> | |
7 | #include <linux/pageblock-flags.h> | |
ee6f509c | 8 | #include <linux/memory.h> |
a5d76b54 KH |
9 | #include "internal.h" |
10 | ||
702d1a6e MK |
11 | /* called while holding zone->lock */ |
12 | static void set_pageblock_isolate(struct page *page) | |
13 | { | |
14 | if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE) | |
15 | return; | |
16 | ||
17 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | |
18 | page_zone(page)->nr_pageblock_isolate++; | |
19 | } | |
20 | ||
21 | /* called while holding zone->lock */ | |
22 | static void restore_pageblock_isolate(struct page *page, int migratetype) | |
23 | { | |
24 | struct zone *zone = page_zone(page); | |
25 | if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) | |
26 | return; | |
27 | ||
28 | BUG_ON(zone->nr_pageblock_isolate <= 0); | |
29 | set_pageblock_migratetype(page, migratetype); | |
30 | zone->nr_pageblock_isolate--; | |
31 | } | |
32 | ||
ee6f509c MK |
33 | int set_migratetype_isolate(struct page *page) |
34 | { | |
35 | struct zone *zone; | |
36 | unsigned long flags, pfn; | |
37 | struct memory_isolate_notify arg; | |
38 | int notifier_ret; | |
39 | int ret = -EBUSY; | |
40 | ||
41 | zone = page_zone(page); | |
42 | ||
43 | spin_lock_irqsave(&zone->lock, flags); | |
44 | ||
45 | pfn = page_to_pfn(page); | |
46 | arg.start_pfn = pfn; | |
47 | arg.nr_pages = pageblock_nr_pages; | |
48 | arg.pages_found = 0; | |
49 | ||
50 | /* | |
51 | * It may be possible to isolate a pageblock even if the | |
52 | * migratetype is not MIGRATE_MOVABLE. The memory isolation | |
53 | * notifier chain is used by balloon drivers to return the | |
54 | * number of pages in a range that are held by the balloon | |
55 | * driver to shrink memory. If all the pages are accounted for | |
56 | * by balloons, are free, or on the LRU, isolation can continue. | |
57 | * Later, for example, when memory hotplug notifier runs, these | |
58 | * pages reported as "can be isolated" should be isolated(freed) | |
59 | * by the balloon driver through the memory notifier chain. | |
60 | */ | |
61 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); | |
62 | notifier_ret = notifier_to_errno(notifier_ret); | |
63 | if (notifier_ret) | |
64 | goto out; | |
65 | /* | |
66 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. | |
67 | * We just check MOVABLE pages. | |
68 | */ | |
69 | if (!has_unmovable_pages(zone, page, arg.pages_found)) | |
70 | ret = 0; | |
71 | ||
72 | /* | |
73 | * immobile means "not-on-lru" paes. If immobile is larger than | |
74 | * removable-by-driver pages reported by notifier, we'll fail. | |
75 | */ | |
76 | ||
77 | out: | |
78 | if (!ret) { | |
2139cbe6 | 79 | unsigned long nr_pages; |
d1ce749a | 80 | int migratetype = get_pageblock_migratetype(page); |
2139cbe6 | 81 | |
702d1a6e | 82 | set_pageblock_isolate(page); |
2139cbe6 BZ |
83 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
84 | ||
d1ce749a | 85 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
ee6f509c MK |
86 | } |
87 | ||
88 | spin_unlock_irqrestore(&zone->lock, flags); | |
89 | if (!ret) | |
90 | drain_all_pages(); | |
91 | return ret; | |
92 | } | |
93 | ||
94 | void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |
95 | { | |
96 | struct zone *zone; | |
2139cbe6 BZ |
97 | unsigned long flags, nr_pages; |
98 | ||
ee6f509c MK |
99 | zone = page_zone(page); |
100 | spin_lock_irqsave(&zone->lock, flags); | |
101 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) | |
102 | goto out; | |
2139cbe6 | 103 | nr_pages = move_freepages_block(zone, page, migratetype); |
d1ce749a | 104 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
702d1a6e | 105 | restore_pageblock_isolate(page, migratetype); |
ee6f509c MK |
106 | out: |
107 | spin_unlock_irqrestore(&zone->lock, flags); | |
108 | } | |
109 | ||
a5d76b54 KH |
110 | static inline struct page * |
111 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) | |
112 | { | |
113 | int i; | |
114 | for (i = 0; i < nr_pages; i++) | |
115 | if (pfn_valid_within(pfn + i)) | |
116 | break; | |
117 | if (unlikely(i == nr_pages)) | |
118 | return NULL; | |
119 | return pfn_to_page(pfn + i); | |
120 | } | |
121 | ||
122 | /* | |
123 | * start_isolate_page_range() -- make page-allocation-type of range of pages | |
124 | * to be MIGRATE_ISOLATE. | |
125 | * @start_pfn: The lower PFN of the range to be isolated. | |
126 | * @end_pfn: The upper PFN of the range to be isolated. | |
0815f3d8 | 127 | * @migratetype: migrate type to set in error recovery. |
a5d76b54 KH |
128 | * |
129 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in | |
130 | * the range will never be allocated. Any free pages and pages freed in the | |
131 | * future will not be allocated again. | |
132 | * | |
133 | * start_pfn/end_pfn must be aligned to pageblock_order. | |
134 | * Returns 0 on success and -EBUSY if any part of range cannot be isolated. | |
135 | */ | |
0815f3d8 MN |
136 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
137 | unsigned migratetype) | |
a5d76b54 KH |
138 | { |
139 | unsigned long pfn; | |
140 | unsigned long undo_pfn; | |
141 | struct page *page; | |
142 | ||
143 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); | |
144 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); | |
145 | ||
146 | for (pfn = start_pfn; | |
147 | pfn < end_pfn; | |
148 | pfn += pageblock_nr_pages) { | |
149 | page = __first_valid_page(pfn, pageblock_nr_pages); | |
150 | if (page && set_migratetype_isolate(page)) { | |
151 | undo_pfn = pfn; | |
152 | goto undo; | |
153 | } | |
154 | } | |
155 | return 0; | |
156 | undo: | |
157 | for (pfn = start_pfn; | |
dbc0e4ce | 158 | pfn < undo_pfn; |
a5d76b54 | 159 | pfn += pageblock_nr_pages) |
0815f3d8 | 160 | unset_migratetype_isolate(pfn_to_page(pfn), migratetype); |
a5d76b54 KH |
161 | |
162 | return -EBUSY; | |
163 | } | |
164 | ||
165 | /* | |
166 | * Make isolated pages available again. | |
167 | */ | |
0815f3d8 MN |
168 | int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
169 | unsigned migratetype) | |
a5d76b54 KH |
170 | { |
171 | unsigned long pfn; | |
172 | struct page *page; | |
173 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); | |
174 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); | |
175 | for (pfn = start_pfn; | |
176 | pfn < end_pfn; | |
177 | pfn += pageblock_nr_pages) { | |
178 | page = __first_valid_page(pfn, pageblock_nr_pages); | |
dbc0e4ce | 179 | if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
a5d76b54 | 180 | continue; |
0815f3d8 | 181 | unset_migratetype_isolate(page, migratetype); |
a5d76b54 KH |
182 | } |
183 | return 0; | |
184 | } | |
185 | /* | |
186 | * Test all pages in the range is free(means isolated) or not. | |
187 | * all pages in [start_pfn...end_pfn) must be in the same zone. | |
188 | * zone->lock must be held before call this. | |
189 | * | |
0815f3d8 | 190 | * Returns 1 if all pages in the range are isolated. |
a5d76b54 KH |
191 | */ |
192 | static int | |
193 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) | |
194 | { | |
195 | struct page *page; | |
196 | ||
197 | while (pfn < end_pfn) { | |
198 | if (!pfn_valid_within(pfn)) { | |
199 | pfn++; | |
200 | continue; | |
201 | } | |
202 | page = pfn_to_page(pfn); | |
41d575ad | 203 | if (PageBuddy(page)) { |
435b405c MK |
204 | /* |
205 | * If race between isolatation and allocation happens, | |
206 | * some free pages could be in MIGRATE_MOVABLE list | |
207 | * although pageblock's migratation type of the page | |
208 | * is MIGRATE_ISOLATE. Catch it and move the page into | |
209 | * MIGRATE_ISOLATE list. | |
210 | */ | |
211 | if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { | |
212 | struct page *end_page; | |
213 | ||
214 | end_page = page + (1 << page_order(page)) - 1; | |
215 | move_freepages(page_zone(page), page, end_page, | |
216 | MIGRATE_ISOLATE); | |
217 | } | |
a5d76b54 | 218 | pfn += 1 << page_order(page); |
41d575ad | 219 | } |
a5d76b54 | 220 | else if (page_count(page) == 0 && |
b12c4ad1 | 221 | get_freepage_migratetype(page) == MIGRATE_ISOLATE) |
a5d76b54 KH |
222 | pfn += 1; |
223 | else | |
224 | break; | |
225 | } | |
226 | if (pfn < end_pfn) | |
227 | return 0; | |
228 | return 1; | |
229 | } | |
230 | ||
231 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |
232 | { | |
6c1b7f68 | 233 | unsigned long pfn, flags; |
a5d76b54 | 234 | struct page *page; |
6c1b7f68 GS |
235 | struct zone *zone; |
236 | int ret; | |
a5d76b54 | 237 | |
a5d76b54 KH |
238 | /* |
239 | * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page | |
240 | * is not aligned to pageblock_nr_pages. | |
241 | * Then we just check pagetype fist. | |
242 | */ | |
243 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | |
244 | page = __first_valid_page(pfn, pageblock_nr_pages); | |
dbc0e4ce | 245 | if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
a5d76b54 KH |
246 | break; |
247 | } | |
a70dcb96 GS |
248 | page = __first_valid_page(start_pfn, end_pfn - start_pfn); |
249 | if ((pfn < end_pfn) || !page) | |
a5d76b54 KH |
250 | return -EBUSY; |
251 | /* Check all pages are free or Marked as ISOLATED */ | |
a70dcb96 | 252 | zone = page_zone(page); |
6c1b7f68 GS |
253 | spin_lock_irqsave(&zone->lock, flags); |
254 | ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); | |
255 | spin_unlock_irqrestore(&zone->lock, flags); | |
256 | return ret ? 0 : -EBUSY; | |
a5d76b54 | 257 | } |