]>
Commit | Line | Data |
---|---|---|
2840d498 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5981690d | 2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ |
7d3dcf26 | 3 | #include <linux/device.h> |
92281dee | 4 | #include <linux/io.h> |
0207df4f | 5 | #include <linux/kasan.h> |
41e94a85 | 6 | #include <linux/memory_hotplug.h> |
dc90f084 | 7 | #include <linux/memremap.h> |
bcfa4b72 | 8 | #include <linux/pfn_t.h> |
5042db43 | 9 | #include <linux/swap.h> |
9ffc1d19 | 10 | #include <linux/mmzone.h> |
5042db43 | 11 | #include <linux/swapops.h> |
bcfa4b72 | 12 | #include <linux/types.h> |
e7638488 | 13 | #include <linux/wait_bit.h> |
bcfa4b72 | 14 | #include <linux/xarray.h> |
27674ef6 | 15 | #include "internal.h" |
92281dee | 16 | |
bcfa4b72 | 17 | static DEFINE_XARRAY(pgmap_array); |
9476df7d | 18 | |
9ffc1d19 DW |
19 | /* |
20 | * The memremap() and memremap_pages() interfaces are alternately used | |
21 | * to map persistent memory namespaces. These interfaces place different | |
22 | * constraints on the alignment and size of the mapping (namespace). | |
23 | * memremap() can map individual PAGE_SIZE pages. memremap_pages() can | |
24 | * only map subsections (2MB), and at least one architecture (PowerPC) | |
25 | * the minimum mapping granularity of memremap_pages() is 16MB. | |
26 | * | |
27 | * The role of memremap_compat_align() is to communicate the minimum | |
28 | * arch supported alignment of a namespace such that it can freely | |
29 | * switch modes without violating the arch constraint. Namely, do not | |
30 | * allow a namespace to be PAGE_SIZE aligned since that namespace may be | |
31 | * reconfigured into a mode that requires SUBSECTION_SIZE alignment. | |
32 | */ | |
33 | #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN | |
34 | unsigned long memremap_compat_align(void) | |
35 | { | |
36 | return SUBSECTION_SIZE; | |
37 | } | |
38 | EXPORT_SYMBOL_GPL(memremap_compat_align); | |
39 | #endif | |
40 | ||
27674ef6 | 41 | #ifdef CONFIG_FS_DAX |
f6a55e1a CH |
42 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); |
43 | EXPORT_SYMBOL(devmap_managed_key); | |
f6a55e1a | 44 | |
46b1ee38 | 45 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
f6a55e1a | 46 | { |
27674ef6 | 47 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
46b1ee38 | 48 | static_branch_dec(&devmap_managed_key); |
f6a55e1a CH |
49 | } |
50 | ||
46b1ee38 | 51 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 52 | { |
27674ef6 | 53 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
46b1ee38 | 54 | static_branch_inc(&devmap_managed_key); |
f6a55e1a CH |
55 | } |
56 | #else | |
46b1ee38 | 57 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 58 | { |
f6a55e1a | 59 | } |
46b1ee38 | 60 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
6f42193f CH |
61 | { |
62 | } | |
27674ef6 | 63 | #endif /* CONFIG_FS_DAX */ |
f6a55e1a | 64 | |
a4574f63 | 65 | static void pgmap_array_delete(struct range *range) |
ab1b597e | 66 | { |
a4574f63 | 67 | xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), |
bcfa4b72 | 68 | NULL, GFP_KERNEL); |
ab1b597e | 69 | synchronize_rcu(); |
9476df7d DW |
70 | } |
71 | ||
b7b3c01b | 72 | static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 73 | { |
b7b3c01b DW |
74 | struct range *range = &pgmap->ranges[range_id]; |
75 | unsigned long pfn = PHYS_PFN(range->start); | |
76 | ||
77 | if (range_id) | |
78 | return pfn; | |
79 | return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); | |
5c2c2587 DW |
80 | } |
81 | ||
34dc45be DW |
82 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
83 | { | |
84 | int i; | |
85 | ||
86 | for (i = 0; i < pgmap->nr_range; i++) { | |
87 | struct range *range = &pgmap->ranges[i]; | |
88 | ||
89 | if (pfn >= PHYS_PFN(range->start) && | |
90 | pfn <= PHYS_PFN(range->end)) | |
91 | return pfn >= pfn_first(pgmap, i); | |
92 | } | |
93 | ||
94 | return false; | |
95 | } | |
96 | ||
b7b3c01b | 97 | static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 98 | { |
b7b3c01b | 99 | const struct range *range = &pgmap->ranges[range_id]; |
5c2c2587 | 100 | |
a4574f63 | 101 | return (range->start + range_len(range)) >> PAGE_SHIFT; |
5c2c2587 DW |
102 | } |
103 | ||
c4386bd8 JM |
104 | static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) |
105 | { | |
106 | return (pfn_end(pgmap, range_id) - | |
107 | pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; | |
949b9325 DW |
108 | } |
109 | ||
b7b3c01b | 110 | static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) |
41e94a85 | 111 | { |
b7b3c01b | 112 | struct range *range = &pgmap->ranges[range_id]; |
77e080e7 | 113 | struct page *first_page; |
71389703 | 114 | |
77e080e7 | 115 | /* make sure to access a memmap that was actually initialized */ |
b7b3c01b | 116 | first_page = pfn_to_page(pfn_first(pgmap, range_id)); |
77e080e7 | 117 | |
41e94a85 | 118 | /* pages are dead and unused, undo the arch mapping */ |
f931ab47 | 119 | mem_hotplug_begin(); |
a4574f63 DW |
120 | remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), |
121 | PHYS_PFN(range_len(range))); | |
69324b8f | 122 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
a4574f63 DW |
123 | __remove_pages(PHYS_PFN(range->start), |
124 | PHYS_PFN(range_len(range)), NULL); | |
69324b8f | 125 | } else { |
65a2aa5f | 126 | arch_remove_memory(range->start, range_len(range), |
514caf23 | 127 | pgmap_altmap(pgmap)); |
a4574f63 | 128 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); |
69324b8f | 129 | } |
f931ab47 | 130 | mem_hotplug_done(); |
b5d24fda | 131 | |
a4574f63 DW |
132 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); |
133 | pgmap_array_delete(range); | |
b7b3c01b DW |
134 | } |
135 | ||
136 | void memunmap_pages(struct dev_pagemap *pgmap) | |
137 | { | |
b7b3c01b DW |
138 | int i; |
139 | ||
b80892ca | 140 | percpu_ref_kill(&pgmap->ref); |
b7b3c01b | 141 | for (i = 0; i < pgmap->nr_range; i++) |
27674ef6 | 142 | percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); |
b80892ca CH |
143 | wait_for_completion(&pgmap->done); |
144 | percpu_ref_exit(&pgmap->ref); | |
b7b3c01b DW |
145 | |
146 | for (i = 0; i < pgmap->nr_range; i++) | |
147 | pageunmap_range(pgmap, i); | |
148 | ||
fdc029b1 | 149 | WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); |
46b1ee38 | 150 | devmap_managed_enable_put(pgmap); |
9476df7d | 151 | } |
6869b7b2 CH |
152 | EXPORT_SYMBOL_GPL(memunmap_pages); |
153 | ||
154 | static void devm_memremap_pages_release(void *data) | |
155 | { | |
156 | memunmap_pages(data); | |
157 | } | |
9476df7d | 158 | |
24917f6b CH |
159 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
160 | { | |
b80892ca | 161 | struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); |
24917f6b CH |
162 | |
163 | complete(&pgmap->done); | |
164 | } | |
165 | ||
b7b3c01b DW |
166 | static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, |
167 | int range_id, int nid) | |
41e94a85 | 168 | { |
bca3feaa | 169 | const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; |
b7b3c01b | 170 | struct range *range = &pgmap->ranges[range_id]; |
966cf44f | 171 | struct dev_pagemap *conflict_pgmap; |
6869b7b2 | 172 | int error, is_ram; |
5f29a77c | 173 | |
b7b3c01b DW |
174 | if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, |
175 | "altmap not supported for multiple ranges\n")) | |
176 | return -EINVAL; | |
f6a55e1a | 177 | |
a4574f63 | 178 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); |
15d36fec | 179 | if (conflict_pgmap) { |
6869b7b2 | 180 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 181 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 182 | return -ENOMEM; |
15d36fec DJ |
183 | } |
184 | ||
a4574f63 | 185 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); |
15d36fec | 186 | if (conflict_pgmap) { |
6869b7b2 | 187 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 188 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 189 | return -ENOMEM; |
15d36fec DJ |
190 | } |
191 | ||
a4574f63 | 192 | is_ram = region_intersects(range->start, range_len(range), |
d37a14bb | 193 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a85 | 194 | |
06489cfb | 195 | if (is_ram != REGION_DISJOINT) { |
a4574f63 DW |
196 | WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", |
197 | is_ram == REGION_MIXED ? "mixed" : "ram", | |
198 | range->start, range->end); | |
b7b3c01b | 199 | return -ENXIO; |
41e94a85 CH |
200 | } |
201 | ||
a4574f63 DW |
202 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), |
203 | PHYS_PFN(range->end), pgmap, GFP_KERNEL)); | |
9476df7d | 204 | if (error) |
b7b3c01b | 205 | return error; |
9476df7d | 206 | |
41e94a85 | 207 | if (nid < 0) |
7eff93b7 | 208 | nid = numa_mem_id(); |
41e94a85 | 209 | |
b7b3c01b | 210 | error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, |
a4574f63 | 211 | range_len(range)); |
9049771f DW |
212 | if (error) |
213 | goto err_pfn_remap; | |
214 | ||
bca3feaa AK |
215 | if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { |
216 | error = -EINVAL; | |
217 | goto err_pfn_remap; | |
218 | } | |
219 | ||
f931ab47 | 220 | mem_hotplug_begin(); |
69324b8f DW |
221 | |
222 | /* | |
223 | * For device private memory we call add_pages() as we only need to | |
224 | * allocate and initialize struct page for the device memory. More- | |
225 | * over the device memory is un-accessible thus we do not want to | |
226 | * create a linear mapping for the memory like arch_add_memory() | |
227 | * would do. | |
228 | * | |
229 | * For all other device memory types, which are accessible by | |
230 | * the CPU, we do want the linear mapping and thus use | |
231 | * arch_add_memory(). | |
232 | */ | |
bca3feaa | 233 | if (is_private) { |
a4574f63 | 234 | error = add_pages(nid, PHYS_PFN(range->start), |
b7b3c01b | 235 | PHYS_PFN(range_len(range)), params); |
69324b8f | 236 | } else { |
a4574f63 | 237 | error = kasan_add_zero_shadow(__va(range->start), range_len(range)); |
69324b8f DW |
238 | if (error) { |
239 | mem_hotplug_done(); | |
240 | goto err_kasan; | |
241 | } | |
242 | ||
a4574f63 | 243 | error = arch_add_memory(nid, range->start, range_len(range), |
b7b3c01b | 244 | params); |
69324b8f DW |
245 | } |
246 | ||
247 | if (!error) { | |
248 | struct zone *zone; | |
249 | ||
250 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | |
a4574f63 | 251 | move_pfn_range_to_zone(zone, PHYS_PFN(range->start), |
d882c006 DH |
252 | PHYS_PFN(range_len(range)), params->altmap, |
253 | MIGRATE_MOVABLE); | |
0207df4f AR |
254 | } |
255 | ||
f931ab47 | 256 | mem_hotplug_done(); |
9476df7d DW |
257 | if (error) |
258 | goto err_add_memory; | |
41e94a85 | 259 | |
966cf44f AD |
260 | /* |
261 | * Initialization of the pages has been deferred until now in order | |
262 | * to allow us to do the work while not holding the hotplug lock. | |
263 | */ | |
264 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
a4574f63 DW |
265 | PHYS_PFN(range->start), |
266 | PHYS_PFN(range_len(range)), pgmap); | |
f56caeda | 267 | percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); |
b7b3c01b | 268 | return 0; |
9476df7d | 269 | |
b7b3c01b | 270 | err_add_memory: |
a74c6c00 ML |
271 | if (!is_private) |
272 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); | |
b7b3c01b | 273 | err_kasan: |
a4574f63 | 274 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); |
b7b3c01b | 275 | err_pfn_remap: |
a4574f63 | 276 | pgmap_array_delete(range); |
b7b3c01b DW |
277 | return error; |
278 | } | |
279 | ||
280 | ||
281 | /* | |
282 | * Not device managed version of dev_memremap_pages, undone by | |
283 | * memunmap_pages(). Please use dev_memremap_pages if you have a struct | |
284 | * device available. | |
285 | */ | |
286 | void *memremap_pages(struct dev_pagemap *pgmap, int nid) | |
287 | { | |
288 | struct mhp_params params = { | |
289 | .altmap = pgmap_altmap(pgmap), | |
4917f55b | 290 | .pgmap = pgmap, |
b7b3c01b DW |
291 | .pgprot = PAGE_KERNEL, |
292 | }; | |
293 | const int nr_range = pgmap->nr_range; | |
b7b3c01b DW |
294 | int error, i; |
295 | ||
296 | if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) | |
297 | return ERR_PTR(-EINVAL); | |
298 | ||
299 | switch (pgmap->type) { | |
300 | case MEMORY_DEVICE_PRIVATE: | |
301 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { | |
302 | WARN(1, "Device private memory not supported\n"); | |
303 | return ERR_PTR(-EINVAL); | |
304 | } | |
305 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { | |
306 | WARN(1, "Missing migrate_to_ram method\n"); | |
307 | return ERR_PTR(-EINVAL); | |
308 | } | |
46b1ee38 RC |
309 | if (!pgmap->ops->page_free) { |
310 | WARN(1, "Missing page_free method\n"); | |
311 | return ERR_PTR(-EINVAL); | |
312 | } | |
b7b3c01b DW |
313 | if (!pgmap->owner) { |
314 | WARN(1, "Missing owner\n"); | |
315 | return ERR_PTR(-EINVAL); | |
316 | } | |
317 | break; | |
318 | case MEMORY_DEVICE_FS_DAX: | |
be8a80b3 | 319 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { |
b7b3c01b DW |
320 | WARN(1, "File system DAX not supported\n"); |
321 | return ERR_PTR(-EINVAL); | |
322 | } | |
323 | break; | |
324 | case MEMORY_DEVICE_GENERIC: | |
b7b3c01b DW |
325 | break; |
326 | case MEMORY_DEVICE_PCI_P2PDMA: | |
327 | params.pgprot = pgprot_noncached(params.pgprot); | |
b7b3c01b DW |
328 | break; |
329 | default: | |
330 | WARN(1, "Invalid pgmap type %d\n", pgmap->type); | |
331 | break; | |
332 | } | |
333 | ||
b80892ca CH |
334 | init_completion(&pgmap->done); |
335 | error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0, | |
336 | GFP_KERNEL); | |
337 | if (error) | |
338 | return ERR_PTR(error); | |
b7b3c01b | 339 | |
46b1ee38 | 340 | devmap_managed_enable_get(pgmap); |
b7b3c01b DW |
341 | |
342 | /* | |
343 | * Clear the pgmap nr_range as it will be incremented for each | |
344 | * successfully processed range. This communicates how many | |
345 | * regions to unwind in the abort case. | |
346 | */ | |
347 | pgmap->nr_range = 0; | |
348 | error = 0; | |
349 | for (i = 0; i < nr_range; i++) { | |
350 | error = pagemap_range(pgmap, ¶ms, i, nid); | |
351 | if (error) | |
352 | break; | |
353 | pgmap->nr_range++; | |
354 | } | |
355 | ||
356 | if (i < nr_range) { | |
357 | memunmap_pages(pgmap); | |
358 | pgmap->nr_range = nr_range; | |
359 | return ERR_PTR(error); | |
360 | } | |
361 | ||
362 | return __va(pgmap->ranges[0].start); | |
41e94a85 | 363 | } |
6869b7b2 CH |
364 | EXPORT_SYMBOL_GPL(memremap_pages); |
365 | ||
366 | /** | |
367 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
368 | * @dev: hosting device for @res | |
369 | * @pgmap: pointer to a struct dev_pagemap | |
370 | * | |
371 | * Notes: | |
372 | * 1/ At a minimum the res and type members of @pgmap must be initialized | |
373 | * by the caller before passing it to this function | |
374 | * | |
375 | * 2/ The altmap field may optionally be initialized, in which case | |
376 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. | |
377 | * | |
378 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be | |
379 | * 'live' on entry and will be killed and reaped at | |
380 | * devm_memremap_pages_release() time, or if this routine fails. | |
381 | * | |
a4574f63 | 382 | * 4/ range is expected to be a host memory range that could feasibly be |
6869b7b2 CH |
383 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
384 | * this is not enforced. | |
385 | */ | |
386 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |
387 | { | |
388 | int error; | |
389 | void *ret; | |
390 | ||
391 | ret = memremap_pages(pgmap, dev_to_node(dev)); | |
392 | if (IS_ERR(ret)) | |
393 | return ret; | |
394 | ||
395 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, | |
396 | pgmap); | |
397 | if (error) | |
398 | return ERR_PTR(error); | |
399 | return ret; | |
400 | } | |
808153e1 | 401 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc | 402 | |
2e3f139e DW |
403 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
404 | { | |
405 | devm_release_action(dev, devm_memremap_pages_release, pgmap); | |
406 | } | |
407 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); | |
408 | ||
4b94ffdc DW |
409 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
410 | { | |
411 | /* number of pfns from base where pfn_to_page() is valid */ | |
514caf23 CH |
412 | if (altmap) |
413 | return altmap->reserve + altmap->free; | |
414 | return 0; | |
4b94ffdc DW |
415 | } |
416 | ||
417 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
418 | { | |
419 | altmap->alloc -= nr_pfns; | |
420 | } | |
421 | ||
0822acb8 CH |
422 | /** |
423 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
424 | * @pfn: page frame number to lookup page_map | |
425 | * @pgmap: optional known pgmap that already has a reference | |
426 | * | |
832d7aa0 CH |
427 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
428 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
429 | */ |
430 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
431 | struct dev_pagemap *pgmap) | |
432 | { | |
0822acb8 CH |
433 | resource_size_t phys = PFN_PHYS(pfn); |
434 | ||
435 | /* | |
832d7aa0 | 436 | * In the cached case we're already holding a live reference. |
0822acb8 | 437 | */ |
832d7aa0 | 438 | if (pgmap) { |
a4574f63 | 439 | if (phys >= pgmap->range.start && phys <= pgmap->range.end) |
832d7aa0 CH |
440 | return pgmap; |
441 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
442 | } |
443 | ||
444 | /* fall back to slow path lookup */ | |
445 | rcu_read_lock(); | |
bcfa4b72 | 446 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
b80892ca | 447 | if (pgmap && !percpu_ref_tryget_live(&pgmap->ref)) |
0822acb8 CH |
448 | pgmap = NULL; |
449 | rcu_read_unlock(); | |
450 | ||
451 | return pgmap; | |
452 | } | |
e7638488 | 453 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
7b2d55d2 | 454 | |
27674ef6 | 455 | void free_zone_device_page(struct page *page) |
7b2d55d2 | 456 | { |
5cbf9942 | 457 | if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) |
429589d6 | 458 | return; |
7ab0ad0e | 459 | |
bbc6b703 | 460 | mem_cgroup_uncharge(page_folio(page)); |
429589d6 | 461 | |
78fbe906 DH |
462 | /* |
463 | * Note: we don't expect anonymous compound pages yet. Once supported | |
464 | * and we could PTE-map them similar to THP, we'd have to clear | |
465 | * PG_anon_exclusive on all tail pages. | |
466 | */ | |
467 | VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); | |
468 | if (PageAnon(page)) | |
469 | __ClearPageAnonExclusive(page); | |
470 | ||
429589d6 | 471 | /* |
5cbf9942 | 472 | * When a device managed page is freed, the page->mapping field |
429589d6 DW |
473 | * may still contain a (stale) mapping value. For example, the |
474 | * lower bits of page->mapping may still identify the page as an | |
475 | * anonymous page. Ultimately, this entire field is just stale | |
476 | * and wrong, and it will cause errors if not cleared. One | |
477 | * example is: | |
478 | * | |
479 | * migrate_vma_pages() | |
480 | * migrate_vma_insert_page() | |
481 | * page_add_new_anon_rmap() | |
482 | * __page_set_anon_rmap() | |
483 | * ...checks page->mapping, via PageAnon(page) call, | |
484 | * and incorrectly concludes that the page is an | |
485 | * anonymous page. Therefore, it incorrectly, | |
486 | * silently fails to set up the new anon rmap. | |
487 | * | |
488 | * For other types of ZONE_DEVICE pages, migration is either | |
489 | * handled differently or not done at all, so there is no need | |
490 | * to clear page->mapping. | |
491 | */ | |
492 | page->mapping = NULL; | |
493 | page->pgmap->ops->page_free(page); | |
27674ef6 CH |
494 | |
495 | /* | |
496 | * Reset the page count to 1 to prepare for handing out the page again. | |
497 | */ | |
498 | set_page_count(page, 1); | |
7b2d55d2 | 499 | } |
75e55d8a | 500 | |
27674ef6 | 501 | #ifdef CONFIG_FS_DAX |
89574945 | 502 | bool __put_devmap_managed_page(struct page *page) |
75e55d8a | 503 | { |
27674ef6 | 504 | if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) |
dc90f084 CH |
505 | return false; |
506 | ||
75e55d8a | 507 | /* |
27674ef6 | 508 | * fsdax page refcounts are 1-based, rather than 0-based: if |
75e55d8a CH |
509 | * refcount is 1, then the page is free and the refcount is |
510 | * stable because nobody holds a reference on the page. | |
511 | */ | |
27674ef6 CH |
512 | if (page_ref_dec_return(page) == 1) |
513 | wake_up_var(&page->_refcount); | |
89574945 | 514 | return true; |
7b2d55d2 | 515 | } |
89574945 | 516 | EXPORT_SYMBOL(__put_devmap_managed_page); |
27674ef6 | 517 | #endif /* CONFIG_FS_DAX */ |