]>
Commit | Line | Data |
---|---|---|
2840d498 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5981690d | 2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ |
7d3dcf26 | 3 | #include <linux/device.h> |
92281dee | 4 | #include <linux/io.h> |
0207df4f | 5 | #include <linux/kasan.h> |
41e94a85 | 6 | #include <linux/memory_hotplug.h> |
dc90f084 | 7 | #include <linux/memremap.h> |
bcfa4b72 | 8 | #include <linux/pfn_t.h> |
5042db43 | 9 | #include <linux/swap.h> |
9ffc1d19 | 10 | #include <linux/mmzone.h> |
5042db43 | 11 | #include <linux/swapops.h> |
bcfa4b72 | 12 | #include <linux/types.h> |
e7638488 | 13 | #include <linux/wait_bit.h> |
bcfa4b72 | 14 | #include <linux/xarray.h> |
27674ef6 | 15 | #include "internal.h" |
92281dee | 16 | |
bcfa4b72 | 17 | static DEFINE_XARRAY(pgmap_array); |
9476df7d | 18 | |
9ffc1d19 DW |
19 | /* |
20 | * The memremap() and memremap_pages() interfaces are alternately used | |
21 | * to map persistent memory namespaces. These interfaces place different | |
22 | * constraints on the alignment and size of the mapping (namespace). | |
23 | * memremap() can map individual PAGE_SIZE pages. memremap_pages() can | |
24 | * only map subsections (2MB), and at least one architecture (PowerPC) | |
25 | * the minimum mapping granularity of memremap_pages() is 16MB. | |
26 | * | |
27 | * The role of memremap_compat_align() is to communicate the minimum | |
28 | * arch supported alignment of a namespace such that it can freely | |
29 | * switch modes without violating the arch constraint. Namely, do not | |
30 | * allow a namespace to be PAGE_SIZE aligned since that namespace may be | |
31 | * reconfigured into a mode that requires SUBSECTION_SIZE alignment. | |
32 | */ | |
33 | #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN | |
34 | unsigned long memremap_compat_align(void) | |
35 | { | |
36 | return SUBSECTION_SIZE; | |
37 | } | |
38 | EXPORT_SYMBOL_GPL(memremap_compat_align); | |
39 | #endif | |
40 | ||
27674ef6 | 41 | #ifdef CONFIG_FS_DAX |
f6a55e1a CH |
42 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); |
43 | EXPORT_SYMBOL(devmap_managed_key); | |
f6a55e1a | 44 | |
46b1ee38 | 45 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
f6a55e1a | 46 | { |
27674ef6 | 47 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
46b1ee38 | 48 | static_branch_dec(&devmap_managed_key); |
f6a55e1a CH |
49 | } |
50 | ||
46b1ee38 | 51 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 52 | { |
27674ef6 | 53 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
46b1ee38 | 54 | static_branch_inc(&devmap_managed_key); |
f6a55e1a CH |
55 | } |
56 | #else | |
46b1ee38 | 57 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 58 | { |
f6a55e1a | 59 | } |
46b1ee38 | 60 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
6f42193f CH |
61 | { |
62 | } | |
27674ef6 | 63 | #endif /* CONFIG_FS_DAX */ |
f6a55e1a | 64 | |
a4574f63 | 65 | static void pgmap_array_delete(struct range *range) |
ab1b597e | 66 | { |
a4574f63 | 67 | xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), |
bcfa4b72 | 68 | NULL, GFP_KERNEL); |
ab1b597e | 69 | synchronize_rcu(); |
9476df7d DW |
70 | } |
71 | ||
b7b3c01b | 72 | static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 73 | { |
b7b3c01b DW |
74 | struct range *range = &pgmap->ranges[range_id]; |
75 | unsigned long pfn = PHYS_PFN(range->start); | |
76 | ||
77 | if (range_id) | |
78 | return pfn; | |
79 | return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); | |
5c2c2587 DW |
80 | } |
81 | ||
34dc45be DW |
82 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
83 | { | |
84 | int i; | |
85 | ||
86 | for (i = 0; i < pgmap->nr_range; i++) { | |
87 | struct range *range = &pgmap->ranges[i]; | |
88 | ||
89 | if (pfn >= PHYS_PFN(range->start) && | |
90 | pfn <= PHYS_PFN(range->end)) | |
91 | return pfn >= pfn_first(pgmap, i); | |
92 | } | |
93 | ||
94 | return false; | |
95 | } | |
96 | ||
b7b3c01b | 97 | static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 98 | { |
b7b3c01b | 99 | const struct range *range = &pgmap->ranges[range_id]; |
5c2c2587 | 100 | |
a4574f63 | 101 | return (range->start + range_len(range)) >> PAGE_SHIFT; |
5c2c2587 DW |
102 | } |
103 | ||
c4386bd8 JM |
104 | static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) |
105 | { | |
106 | return (pfn_end(pgmap, range_id) - | |
107 | pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; | |
949b9325 DW |
108 | } |
109 | ||
b7b3c01b | 110 | static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) |
41e94a85 | 111 | { |
b7b3c01b | 112 | struct range *range = &pgmap->ranges[range_id]; |
77e080e7 | 113 | struct page *first_page; |
71389703 | 114 | |
77e080e7 | 115 | /* make sure to access a memmap that was actually initialized */ |
b7b3c01b | 116 | first_page = pfn_to_page(pfn_first(pgmap, range_id)); |
77e080e7 | 117 | |
41e94a85 | 118 | /* pages are dead and unused, undo the arch mapping */ |
f931ab47 | 119 | mem_hotplug_begin(); |
a4574f63 DW |
120 | remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), |
121 | PHYS_PFN(range_len(range))); | |
69324b8f | 122 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
a4574f63 DW |
123 | __remove_pages(PHYS_PFN(range->start), |
124 | PHYS_PFN(range_len(range)), NULL); | |
69324b8f | 125 | } else { |
65a2aa5f | 126 | arch_remove_memory(range->start, range_len(range), |
514caf23 | 127 | pgmap_altmap(pgmap)); |
a4574f63 | 128 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); |
69324b8f | 129 | } |
f931ab47 | 130 | mem_hotplug_done(); |
b5d24fda | 131 | |
68f48381 | 132 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); |
a4574f63 | 133 | pgmap_array_delete(range); |
b7b3c01b DW |
134 | } |
135 | ||
136 | void memunmap_pages(struct dev_pagemap *pgmap) | |
137 | { | |
b7b3c01b DW |
138 | int i; |
139 | ||
b80892ca | 140 | percpu_ref_kill(&pgmap->ref); |
0dc45ca1 AP |
141 | if (pgmap->type != MEMORY_DEVICE_PRIVATE && |
142 | pgmap->type != MEMORY_DEVICE_COHERENT) | |
143 | for (i = 0; i < pgmap->nr_range; i++) | |
144 | percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); | |
145 | ||
b80892ca | 146 | wait_for_completion(&pgmap->done); |
b7b3c01b DW |
147 | |
148 | for (i = 0; i < pgmap->nr_range; i++) | |
149 | pageunmap_range(pgmap, i); | |
1e57ffb6 | 150 | percpu_ref_exit(&pgmap->ref); |
b7b3c01b | 151 | |
fdc029b1 | 152 | WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); |
46b1ee38 | 153 | devmap_managed_enable_put(pgmap); |
9476df7d | 154 | } |
6869b7b2 CH |
155 | EXPORT_SYMBOL_GPL(memunmap_pages); |
156 | ||
157 | static void devm_memremap_pages_release(void *data) | |
158 | { | |
159 | memunmap_pages(data); | |
160 | } | |
9476df7d | 161 | |
24917f6b CH |
162 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
163 | { | |
b80892ca | 164 | struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); |
24917f6b CH |
165 | |
166 | complete(&pgmap->done); | |
167 | } | |
168 | ||
b7b3c01b DW |
169 | static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, |
170 | int range_id, int nid) | |
41e94a85 | 171 | { |
bca3feaa | 172 | const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; |
b7b3c01b | 173 | struct range *range = &pgmap->ranges[range_id]; |
966cf44f | 174 | struct dev_pagemap *conflict_pgmap; |
6869b7b2 | 175 | int error, is_ram; |
5f29a77c | 176 | |
b7b3c01b DW |
177 | if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, |
178 | "altmap not supported for multiple ranges\n")) | |
179 | return -EINVAL; | |
f6a55e1a | 180 | |
a4574f63 | 181 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); |
15d36fec | 182 | if (conflict_pgmap) { |
6869b7b2 | 183 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 184 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 185 | return -ENOMEM; |
15d36fec DJ |
186 | } |
187 | ||
a4574f63 | 188 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); |
15d36fec | 189 | if (conflict_pgmap) { |
6869b7b2 | 190 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 191 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 192 | return -ENOMEM; |
15d36fec DJ |
193 | } |
194 | ||
a4574f63 | 195 | is_ram = region_intersects(range->start, range_len(range), |
d37a14bb | 196 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a85 | 197 | |
06489cfb | 198 | if (is_ram != REGION_DISJOINT) { |
a4574f63 DW |
199 | WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", |
200 | is_ram == REGION_MIXED ? "mixed" : "ram", | |
201 | range->start, range->end); | |
b7b3c01b | 202 | return -ENXIO; |
41e94a85 CH |
203 | } |
204 | ||
a4574f63 DW |
205 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), |
206 | PHYS_PFN(range->end), pgmap, GFP_KERNEL)); | |
9476df7d | 207 | if (error) |
b7b3c01b | 208 | return error; |
9476df7d | 209 | |
41e94a85 | 210 | if (nid < 0) |
7eff93b7 | 211 | nid = numa_mem_id(); |
41e94a85 | 212 | |
b7b3c01b | 213 | error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, |
a4574f63 | 214 | range_len(range)); |
9049771f DW |
215 | if (error) |
216 | goto err_pfn_remap; | |
217 | ||
bca3feaa AK |
218 | if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { |
219 | error = -EINVAL; | |
a04e1928 | 220 | goto err_kasan; |
bca3feaa AK |
221 | } |
222 | ||
f931ab47 | 223 | mem_hotplug_begin(); |
69324b8f DW |
224 | |
225 | /* | |
226 | * For device private memory we call add_pages() as we only need to | |
227 | * allocate and initialize struct page for the device memory. More- | |
228 | * over the device memory is un-accessible thus we do not want to | |
229 | * create a linear mapping for the memory like arch_add_memory() | |
230 | * would do. | |
231 | * | |
232 | * For all other device memory types, which are accessible by | |
233 | * the CPU, we do want the linear mapping and thus use | |
234 | * arch_add_memory(). | |
235 | */ | |
bca3feaa | 236 | if (is_private) { |
a4574f63 | 237 | error = add_pages(nid, PHYS_PFN(range->start), |
b7b3c01b | 238 | PHYS_PFN(range_len(range)), params); |
69324b8f | 239 | } else { |
a4574f63 | 240 | error = kasan_add_zero_shadow(__va(range->start), range_len(range)); |
69324b8f DW |
241 | if (error) { |
242 | mem_hotplug_done(); | |
243 | goto err_kasan; | |
244 | } | |
245 | ||
a4574f63 | 246 | error = arch_add_memory(nid, range->start, range_len(range), |
b7b3c01b | 247 | params); |
69324b8f DW |
248 | } |
249 | ||
250 | if (!error) { | |
251 | struct zone *zone; | |
252 | ||
253 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | |
a4574f63 | 254 | move_pfn_range_to_zone(zone, PHYS_PFN(range->start), |
d882c006 DH |
255 | PHYS_PFN(range_len(range)), params->altmap, |
256 | MIGRATE_MOVABLE); | |
0207df4f AR |
257 | } |
258 | ||
f931ab47 | 259 | mem_hotplug_done(); |
9476df7d DW |
260 | if (error) |
261 | goto err_add_memory; | |
41e94a85 | 262 | |
966cf44f AD |
263 | /* |
264 | * Initialization of the pages has been deferred until now in order | |
265 | * to allow us to do the work while not holding the hotplug lock. | |
266 | */ | |
267 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
a4574f63 DW |
268 | PHYS_PFN(range->start), |
269 | PHYS_PFN(range_len(range)), pgmap); | |
0dc45ca1 AP |
270 | if (pgmap->type != MEMORY_DEVICE_PRIVATE && |
271 | pgmap->type != MEMORY_DEVICE_COHERENT) | |
272 | percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); | |
b7b3c01b | 273 | return 0; |
9476df7d | 274 | |
b7b3c01b | 275 | err_add_memory: |
a74c6c00 ML |
276 | if (!is_private) |
277 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); | |
b7b3c01b | 278 | err_kasan: |
68f48381 | 279 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); |
b7b3c01b | 280 | err_pfn_remap: |
a4574f63 | 281 | pgmap_array_delete(range); |
b7b3c01b DW |
282 | return error; |
283 | } | |
284 | ||
285 | ||
286 | /* | |
23689037 ML |
287 | * Not device managed version of devm_memremap_pages, undone by |
288 | * memunmap_pages(). Please use devm_memremap_pages if you have a struct | |
b7b3c01b DW |
289 | * device available. |
290 | */ | |
291 | void *memremap_pages(struct dev_pagemap *pgmap, int nid) | |
292 | { | |
293 | struct mhp_params params = { | |
294 | .altmap = pgmap_altmap(pgmap), | |
4917f55b | 295 | .pgmap = pgmap, |
b7b3c01b DW |
296 | .pgprot = PAGE_KERNEL, |
297 | }; | |
298 | const int nr_range = pgmap->nr_range; | |
b7b3c01b DW |
299 | int error, i; |
300 | ||
301 | if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) | |
302 | return ERR_PTR(-EINVAL); | |
303 | ||
304 | switch (pgmap->type) { | |
305 | case MEMORY_DEVICE_PRIVATE: | |
306 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { | |
307 | WARN(1, "Device private memory not supported\n"); | |
308 | return ERR_PTR(-EINVAL); | |
309 | } | |
310 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { | |
311 | WARN(1, "Missing migrate_to_ram method\n"); | |
312 | return ERR_PTR(-EINVAL); | |
313 | } | |
46b1ee38 RC |
314 | if (!pgmap->ops->page_free) { |
315 | WARN(1, "Missing page_free method\n"); | |
316 | return ERR_PTR(-EINVAL); | |
317 | } | |
b7b3c01b DW |
318 | if (!pgmap->owner) { |
319 | WARN(1, "Missing owner\n"); | |
320 | return ERR_PTR(-EINVAL); | |
321 | } | |
322 | break; | |
f25cbb7a AS |
323 | case MEMORY_DEVICE_COHERENT: |
324 | if (!pgmap->ops->page_free) { | |
325 | WARN(1, "Missing page_free method\n"); | |
326 | return ERR_PTR(-EINVAL); | |
327 | } | |
328 | if (!pgmap->owner) { | |
329 | WARN(1, "Missing owner\n"); | |
330 | return ERR_PTR(-EINVAL); | |
331 | } | |
332 | break; | |
b7b3c01b | 333 | case MEMORY_DEVICE_FS_DAX: |
be8a80b3 | 334 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { |
b7b3c01b DW |
335 | WARN(1, "File system DAX not supported\n"); |
336 | return ERR_PTR(-EINVAL); | |
337 | } | |
867400af | 338 | params.pgprot = pgprot_decrypted(params.pgprot); |
b7b3c01b DW |
339 | break; |
340 | case MEMORY_DEVICE_GENERIC: | |
b7b3c01b DW |
341 | break; |
342 | case MEMORY_DEVICE_PCI_P2PDMA: | |
343 | params.pgprot = pgprot_noncached(params.pgprot); | |
b7b3c01b DW |
344 | break; |
345 | default: | |
346 | WARN(1, "Invalid pgmap type %d\n", pgmap->type); | |
347 | break; | |
348 | } | |
349 | ||
b80892ca CH |
350 | init_completion(&pgmap->done); |
351 | error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0, | |
352 | GFP_KERNEL); | |
353 | if (error) | |
354 | return ERR_PTR(error); | |
b7b3c01b | 355 | |
46b1ee38 | 356 | devmap_managed_enable_get(pgmap); |
b7b3c01b DW |
357 | |
358 | /* | |
359 | * Clear the pgmap nr_range as it will be incremented for each | |
360 | * successfully processed range. This communicates how many | |
361 | * regions to unwind in the abort case. | |
362 | */ | |
363 | pgmap->nr_range = 0; | |
364 | error = 0; | |
365 | for (i = 0; i < nr_range; i++) { | |
366 | error = pagemap_range(pgmap, ¶ms, i, nid); | |
367 | if (error) | |
368 | break; | |
369 | pgmap->nr_range++; | |
370 | } | |
371 | ||
372 | if (i < nr_range) { | |
373 | memunmap_pages(pgmap); | |
374 | pgmap->nr_range = nr_range; | |
375 | return ERR_PTR(error); | |
376 | } | |
377 | ||
378 | return __va(pgmap->ranges[0].start); | |
41e94a85 | 379 | } |
6869b7b2 CH |
380 | EXPORT_SYMBOL_GPL(memremap_pages); |
381 | ||
382 | /** | |
383 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
384 | * @dev: hosting device for @res | |
385 | * @pgmap: pointer to a struct dev_pagemap | |
386 | * | |
387 | * Notes: | |
223ec6ab | 388 | * 1/ At a minimum the range and type members of @pgmap must be initialized |
6869b7b2 CH |
389 | * by the caller before passing it to this function |
390 | * | |
391 | * 2/ The altmap field may optionally be initialized, in which case | |
392 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. | |
393 | * | |
394 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be | |
395 | * 'live' on entry and will be killed and reaped at | |
396 | * devm_memremap_pages_release() time, or if this routine fails. | |
397 | * | |
a4574f63 | 398 | * 4/ range is expected to be a host memory range that could feasibly be |
6869b7b2 CH |
399 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
400 | * this is not enforced. | |
401 | */ | |
402 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |
403 | { | |
404 | int error; | |
405 | void *ret; | |
406 | ||
407 | ret = memremap_pages(pgmap, dev_to_node(dev)); | |
408 | if (IS_ERR(ret)) | |
409 | return ret; | |
410 | ||
411 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, | |
412 | pgmap); | |
413 | if (error) | |
414 | return ERR_PTR(error); | |
415 | return ret; | |
416 | } | |
808153e1 | 417 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc | 418 | |
2e3f139e DW |
419 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
420 | { | |
421 | devm_release_action(dev, devm_memremap_pages_release, pgmap); | |
422 | } | |
423 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); | |
424 | ||
4b94ffdc DW |
425 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
426 | { | |
427 | /* number of pfns from base where pfn_to_page() is valid */ | |
514caf23 CH |
428 | if (altmap) |
429 | return altmap->reserve + altmap->free; | |
430 | return 0; | |
4b94ffdc DW |
431 | } |
432 | ||
433 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
434 | { | |
435 | altmap->alloc -= nr_pfns; | |
436 | } | |
437 | ||
0822acb8 CH |
438 | /** |
439 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
440 | * @pfn: page frame number to lookup page_map | |
441 | * @pgmap: optional known pgmap that already has a reference | |
442 | * | |
832d7aa0 CH |
443 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
444 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
445 | */ |
446 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
447 | struct dev_pagemap *pgmap) | |
448 | { | |
0822acb8 CH |
449 | resource_size_t phys = PFN_PHYS(pfn); |
450 | ||
451 | /* | |
832d7aa0 | 452 | * In the cached case we're already holding a live reference. |
0822acb8 | 453 | */ |
832d7aa0 | 454 | if (pgmap) { |
a4574f63 | 455 | if (phys >= pgmap->range.start && phys <= pgmap->range.end) |
832d7aa0 CH |
456 | return pgmap; |
457 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
458 | } |
459 | ||
460 | /* fall back to slow path lookup */ | |
461 | rcu_read_lock(); | |
bcfa4b72 | 462 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
e7b72c48 | 463 | if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref)) |
0822acb8 CH |
464 | pgmap = NULL; |
465 | rcu_read_unlock(); | |
466 | ||
467 | return pgmap; | |
468 | } | |
e7638488 | 469 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
7b2d55d2 | 470 | |
27674ef6 | 471 | void free_zone_device_page(struct page *page) |
7b2d55d2 | 472 | { |
5cbf9942 | 473 | if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) |
429589d6 | 474 | return; |
7ab0ad0e | 475 | |
bbc6b703 | 476 | mem_cgroup_uncharge(page_folio(page)); |
429589d6 | 477 | |
78fbe906 DH |
478 | /* |
479 | * Note: we don't expect anonymous compound pages yet. Once supported | |
480 | * and we could PTE-map them similar to THP, we'd have to clear | |
481 | * PG_anon_exclusive on all tail pages. | |
482 | */ | |
483 | VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); | |
484 | if (PageAnon(page)) | |
485 | __ClearPageAnonExclusive(page); | |
486 | ||
429589d6 | 487 | /* |
5cbf9942 | 488 | * When a device managed page is freed, the page->mapping field |
429589d6 DW |
489 | * may still contain a (stale) mapping value. For example, the |
490 | * lower bits of page->mapping may still identify the page as an | |
491 | * anonymous page. Ultimately, this entire field is just stale | |
492 | * and wrong, and it will cause errors if not cleared. One | |
493 | * example is: | |
494 | * | |
495 | * migrate_vma_pages() | |
496 | * migrate_vma_insert_page() | |
497 | * page_add_new_anon_rmap() | |
498 | * __page_set_anon_rmap() | |
499 | * ...checks page->mapping, via PageAnon(page) call, | |
500 | * and incorrectly concludes that the page is an | |
501 | * anonymous page. Therefore, it incorrectly, | |
502 | * silently fails to set up the new anon rmap. | |
503 | * | |
504 | * For other types of ZONE_DEVICE pages, migration is either | |
505 | * handled differently or not done at all, so there is no need | |
506 | * to clear page->mapping. | |
507 | */ | |
508 | page->mapping = NULL; | |
509 | page->pgmap->ops->page_free(page); | |
27674ef6 | 510 | |
ef233450 AP |
511 | if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && |
512 | page->pgmap->type != MEMORY_DEVICE_COHERENT) | |
0dc45ca1 AP |
513 | /* |
514 | * Reset the page count to 1 to prepare for handing out the page | |
515 | * again. | |
516 | */ | |
ef233450 | 517 | set_page_count(page, 1); |
0dc45ca1 AP |
518 | else |
519 | put_dev_pagemap(page->pgmap); | |
ef233450 AP |
520 | } |
521 | ||
522 | void zone_device_page_init(struct page *page) | |
523 | { | |
0dc45ca1 AP |
524 | /* |
525 | * Drivers shouldn't be allocating pages after calling | |
526 | * memunmap_pages(). | |
527 | */ | |
528 | WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); | |
27674ef6 | 529 | set_page_count(page, 1); |
ef233450 | 530 | lock_page(page); |
7b2d55d2 | 531 | } |
ef233450 | 532 | EXPORT_SYMBOL_GPL(zone_device_page_init); |
75e55d8a | 533 | |
27674ef6 | 534 | #ifdef CONFIG_FS_DAX |
f4f451a1 | 535 | bool __put_devmap_managed_page_refs(struct page *page, int refs) |
75e55d8a | 536 | { |
27674ef6 | 537 | if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) |
dc90f084 CH |
538 | return false; |
539 | ||
75e55d8a | 540 | /* |
27674ef6 | 541 | * fsdax page refcounts are 1-based, rather than 0-based: if |
75e55d8a CH |
542 | * refcount is 1, then the page is free and the refcount is |
543 | * stable because nobody holds a reference on the page. | |
544 | */ | |
f4f451a1 | 545 | if (page_ref_sub_return(page, refs) == 1) |
27674ef6 | 546 | wake_up_var(&page->_refcount); |
89574945 | 547 | return true; |
7b2d55d2 | 548 | } |
f4f451a1 | 549 | EXPORT_SYMBOL(__put_devmap_managed_page_refs); |
27674ef6 | 550 | #endif /* CONFIG_FS_DAX */ |