]>
Commit | Line | Data |
---|---|---|
5981690d DW |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ | |
7d3dcf26 | 3 | #include <linux/device.h> |
92281dee | 4 | #include <linux/io.h> |
0207df4f | 5 | #include <linux/kasan.h> |
41e94a85 | 6 | #include <linux/memory_hotplug.h> |
bcfa4b72 MW |
7 | #include <linux/mm.h> |
8 | #include <linux/pfn_t.h> | |
5042db43 | 9 | #include <linux/swap.h> |
9ffc1d19 | 10 | #include <linux/mmzone.h> |
5042db43 | 11 | #include <linux/swapops.h> |
bcfa4b72 | 12 | #include <linux/types.h> |
e7638488 | 13 | #include <linux/wait_bit.h> |
bcfa4b72 | 14 | #include <linux/xarray.h> |
92281dee | 15 | |
bcfa4b72 | 16 | static DEFINE_XARRAY(pgmap_array); |
9476df7d | 17 | |
9ffc1d19 DW |
18 | /* |
19 | * The memremap() and memremap_pages() interfaces are alternately used | |
20 | * to map persistent memory namespaces. These interfaces place different | |
21 | * constraints on the alignment and size of the mapping (namespace). | |
22 | * memremap() can map individual PAGE_SIZE pages. memremap_pages() can | |
23 | * only map subsections (2MB), and at least one architecture (PowerPC) | |
24 | * the minimum mapping granularity of memremap_pages() is 16MB. | |
25 | * | |
26 | * The role of memremap_compat_align() is to communicate the minimum | |
27 | * arch supported alignment of a namespace such that it can freely | |
28 | * switch modes without violating the arch constraint. Namely, do not | |
29 | * allow a namespace to be PAGE_SIZE aligned since that namespace may be | |
30 | * reconfigured into a mode that requires SUBSECTION_SIZE alignment. | |
31 | */ | |
32 | #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN | |
33 | unsigned long memremap_compat_align(void) | |
34 | { | |
35 | return SUBSECTION_SIZE; | |
36 | } | |
37 | EXPORT_SYMBOL_GPL(memremap_compat_align); | |
38 | #endif | |
39 | ||
f6a55e1a CH |
40 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
41 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); | |
42 | EXPORT_SYMBOL(devmap_managed_key); | |
f6a55e1a | 43 | |
46b1ee38 | 44 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
f6a55e1a | 45 | { |
46b1ee38 RC |
46 | if (pgmap->type == MEMORY_DEVICE_PRIVATE || |
47 | pgmap->type == MEMORY_DEVICE_FS_DAX) | |
48 | static_branch_dec(&devmap_managed_key); | |
f6a55e1a CH |
49 | } |
50 | ||
46b1ee38 | 51 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 52 | { |
46b1ee38 RC |
53 | if (pgmap->type == MEMORY_DEVICE_PRIVATE || |
54 | pgmap->type == MEMORY_DEVICE_FS_DAX) | |
55 | static_branch_inc(&devmap_managed_key); | |
f6a55e1a CH |
56 | } |
57 | #else | |
46b1ee38 | 58 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 59 | { |
f6a55e1a | 60 | } |
46b1ee38 | 61 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
6f42193f CH |
62 | { |
63 | } | |
f6a55e1a CH |
64 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |
65 | ||
a4574f63 | 66 | static void pgmap_array_delete(struct range *range) |
ab1b597e | 67 | { |
a4574f63 | 68 | xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), |
bcfa4b72 | 69 | NULL, GFP_KERNEL); |
ab1b597e | 70 | synchronize_rcu(); |
9476df7d DW |
71 | } |
72 | ||
b7b3c01b | 73 | static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 74 | { |
b7b3c01b DW |
75 | struct range *range = &pgmap->ranges[range_id]; |
76 | unsigned long pfn = PHYS_PFN(range->start); | |
77 | ||
78 | if (range_id) | |
79 | return pfn; | |
80 | return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); | |
5c2c2587 DW |
81 | } |
82 | ||
34dc45be DW |
83 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
84 | { | |
85 | int i; | |
86 | ||
87 | for (i = 0; i < pgmap->nr_range; i++) { | |
88 | struct range *range = &pgmap->ranges[i]; | |
89 | ||
90 | if (pfn >= PHYS_PFN(range->start) && | |
91 | pfn <= PHYS_PFN(range->end)) | |
92 | return pfn >= pfn_first(pgmap, i); | |
93 | } | |
94 | ||
95 | return false; | |
96 | } | |
97 | ||
b7b3c01b | 98 | static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 99 | { |
b7b3c01b | 100 | const struct range *range = &pgmap->ranges[range_id]; |
5c2c2587 | 101 | |
a4574f63 | 102 | return (range->start + range_len(range)) >> PAGE_SHIFT; |
5c2c2587 DW |
103 | } |
104 | ||
949b9325 DW |
105 | static unsigned long pfn_next(unsigned long pfn) |
106 | { | |
107 | if (pfn % 1024 == 0) | |
108 | cond_resched(); | |
109 | return pfn + 1; | |
110 | } | |
111 | ||
b7b3c01b DW |
112 | #define for_each_device_pfn(pfn, map, i) \ |
113 | for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn)) | |
5c2c2587 | 114 | |
24917f6b CH |
115 | static void dev_pagemap_kill(struct dev_pagemap *pgmap) |
116 | { | |
117 | if (pgmap->ops && pgmap->ops->kill) | |
118 | pgmap->ops->kill(pgmap); | |
119 | else | |
120 | percpu_ref_kill(pgmap->ref); | |
121 | } | |
122 | ||
123 | static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) | |
124 | { | |
125 | if (pgmap->ops && pgmap->ops->cleanup) { | |
126 | pgmap->ops->cleanup(pgmap); | |
127 | } else { | |
128 | wait_for_completion(&pgmap->done); | |
129 | percpu_ref_exit(pgmap->ref); | |
130 | } | |
06282373 DW |
131 | /* |
132 | * Undo the pgmap ref assignment for the internal case as the | |
133 | * caller may re-enable the same pgmap. | |
134 | */ | |
135 | if (pgmap->ref == &pgmap->internal_ref) | |
136 | pgmap->ref = NULL; | |
24917f6b CH |
137 | } |
138 | ||
b7b3c01b | 139 | static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) |
41e94a85 | 140 | { |
b7b3c01b | 141 | struct range *range = &pgmap->ranges[range_id]; |
77e080e7 | 142 | struct page *first_page; |
2c2a5af6 | 143 | int nid; |
71389703 | 144 | |
77e080e7 | 145 | /* make sure to access a memmap that was actually initialized */ |
b7b3c01b | 146 | first_page = pfn_to_page(pfn_first(pgmap, range_id)); |
77e080e7 | 147 | |
41e94a85 | 148 | /* pages are dead and unused, undo the arch mapping */ |
77e080e7 | 149 | nid = page_to_nid(first_page); |
2c2a5af6 | 150 | |
f931ab47 | 151 | mem_hotplug_begin(); |
a4574f63 DW |
152 | remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), |
153 | PHYS_PFN(range_len(range))); | |
69324b8f | 154 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
a4574f63 DW |
155 | __remove_pages(PHYS_PFN(range->start), |
156 | PHYS_PFN(range_len(range)), NULL); | |
69324b8f | 157 | } else { |
a4574f63 | 158 | arch_remove_memory(nid, range->start, range_len(range), |
514caf23 | 159 | pgmap_altmap(pgmap)); |
a4574f63 | 160 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); |
69324b8f | 161 | } |
f931ab47 | 162 | mem_hotplug_done(); |
b5d24fda | 163 | |
a4574f63 DW |
164 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); |
165 | pgmap_array_delete(range); | |
b7b3c01b DW |
166 | } |
167 | ||
168 | void memunmap_pages(struct dev_pagemap *pgmap) | |
169 | { | |
170 | unsigned long pfn; | |
171 | int i; | |
172 | ||
173 | dev_pagemap_kill(pgmap); | |
174 | for (i = 0; i < pgmap->nr_range; i++) | |
175 | for_each_device_pfn(pfn, pgmap, i) | |
176 | put_page(pfn_to_page(pfn)); | |
177 | dev_pagemap_cleanup(pgmap); | |
178 | ||
179 | for (i = 0; i < pgmap->nr_range; i++) | |
180 | pageunmap_range(pgmap, i); | |
181 | ||
fdc029b1 | 182 | WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); |
46b1ee38 | 183 | devmap_managed_enable_put(pgmap); |
9476df7d | 184 | } |
6869b7b2 CH |
185 | EXPORT_SYMBOL_GPL(memunmap_pages); |
186 | ||
187 | static void devm_memremap_pages_release(void *data) | |
188 | { | |
189 | memunmap_pages(data); | |
190 | } | |
9476df7d | 191 | |
24917f6b CH |
192 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
193 | { | |
194 | struct dev_pagemap *pgmap = | |
195 | container_of(ref, struct dev_pagemap, internal_ref); | |
196 | ||
197 | complete(&pgmap->done); | |
198 | } | |
199 | ||
b7b3c01b DW |
200 | static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, |
201 | int range_id, int nid) | |
41e94a85 | 202 | { |
bca3feaa | 203 | const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; |
b7b3c01b | 204 | struct range *range = &pgmap->ranges[range_id]; |
966cf44f | 205 | struct dev_pagemap *conflict_pgmap; |
6869b7b2 | 206 | int error, is_ram; |
5f29a77c | 207 | |
b7b3c01b DW |
208 | if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, |
209 | "altmap not supported for multiple ranges\n")) | |
210 | return -EINVAL; | |
f6a55e1a | 211 | |
a4574f63 | 212 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); |
15d36fec | 213 | if (conflict_pgmap) { |
6869b7b2 | 214 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 215 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 216 | return -ENOMEM; |
15d36fec DJ |
217 | } |
218 | ||
a4574f63 | 219 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); |
15d36fec | 220 | if (conflict_pgmap) { |
6869b7b2 | 221 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 222 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 223 | return -ENOMEM; |
15d36fec DJ |
224 | } |
225 | ||
a4574f63 | 226 | is_ram = region_intersects(range->start, range_len(range), |
d37a14bb | 227 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a85 | 228 | |
06489cfb | 229 | if (is_ram != REGION_DISJOINT) { |
a4574f63 DW |
230 | WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", |
231 | is_ram == REGION_MIXED ? "mixed" : "ram", | |
232 | range->start, range->end); | |
b7b3c01b | 233 | return -ENXIO; |
41e94a85 CH |
234 | } |
235 | ||
a4574f63 DW |
236 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), |
237 | PHYS_PFN(range->end), pgmap, GFP_KERNEL)); | |
9476df7d | 238 | if (error) |
b7b3c01b | 239 | return error; |
9476df7d | 240 | |
41e94a85 | 241 | if (nid < 0) |
7eff93b7 | 242 | nid = numa_mem_id(); |
41e94a85 | 243 | |
b7b3c01b | 244 | error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, |
a4574f63 | 245 | range_len(range)); |
9049771f DW |
246 | if (error) |
247 | goto err_pfn_remap; | |
248 | ||
bca3feaa AK |
249 | if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { |
250 | error = -EINVAL; | |
251 | goto err_pfn_remap; | |
252 | } | |
253 | ||
f931ab47 | 254 | mem_hotplug_begin(); |
69324b8f DW |
255 | |
256 | /* | |
257 | * For device private memory we call add_pages() as we only need to | |
258 | * allocate and initialize struct page for the device memory. More- | |
259 | * over the device memory is un-accessible thus we do not want to | |
260 | * create a linear mapping for the memory like arch_add_memory() | |
261 | * would do. | |
262 | * | |
263 | * For all other device memory types, which are accessible by | |
264 | * the CPU, we do want the linear mapping and thus use | |
265 | * arch_add_memory(). | |
266 | */ | |
bca3feaa | 267 | if (is_private) { |
a4574f63 | 268 | error = add_pages(nid, PHYS_PFN(range->start), |
b7b3c01b | 269 | PHYS_PFN(range_len(range)), params); |
69324b8f | 270 | } else { |
a4574f63 | 271 | error = kasan_add_zero_shadow(__va(range->start), range_len(range)); |
69324b8f DW |
272 | if (error) { |
273 | mem_hotplug_done(); | |
274 | goto err_kasan; | |
275 | } | |
276 | ||
a4574f63 | 277 | error = arch_add_memory(nid, range->start, range_len(range), |
b7b3c01b | 278 | params); |
69324b8f DW |
279 | } |
280 | ||
281 | if (!error) { | |
282 | struct zone *zone; | |
283 | ||
284 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | |
a4574f63 | 285 | move_pfn_range_to_zone(zone, PHYS_PFN(range->start), |
d882c006 DH |
286 | PHYS_PFN(range_len(range)), params->altmap, |
287 | MIGRATE_MOVABLE); | |
0207df4f AR |
288 | } |
289 | ||
f931ab47 | 290 | mem_hotplug_done(); |
9476df7d DW |
291 | if (error) |
292 | goto err_add_memory; | |
41e94a85 | 293 | |
966cf44f AD |
294 | /* |
295 | * Initialization of the pages has been deferred until now in order | |
296 | * to allow us to do the work while not holding the hotplug lock. | |
297 | */ | |
298 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
a4574f63 DW |
299 | PHYS_PFN(range->start), |
300 | PHYS_PFN(range_len(range)), pgmap); | |
b7b3c01b DW |
301 | percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) |
302 | - pfn_first(pgmap, range_id)); | |
303 | return 0; | |
9476df7d | 304 | |
b7b3c01b | 305 | err_add_memory: |
a4574f63 | 306 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); |
b7b3c01b | 307 | err_kasan: |
a4574f63 | 308 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); |
b7b3c01b | 309 | err_pfn_remap: |
a4574f63 | 310 | pgmap_array_delete(range); |
b7b3c01b DW |
311 | return error; |
312 | } | |
313 | ||
314 | ||
315 | /* | |
316 | * Not device managed version of dev_memremap_pages, undone by | |
317 | * memunmap_pages(). Please use dev_memremap_pages if you have a struct | |
318 | * device available. | |
319 | */ | |
320 | void *memremap_pages(struct dev_pagemap *pgmap, int nid) | |
321 | { | |
322 | struct mhp_params params = { | |
323 | .altmap = pgmap_altmap(pgmap), | |
324 | .pgprot = PAGE_KERNEL, | |
325 | }; | |
326 | const int nr_range = pgmap->nr_range; | |
b7b3c01b DW |
327 | int error, i; |
328 | ||
329 | if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) | |
330 | return ERR_PTR(-EINVAL); | |
331 | ||
332 | switch (pgmap->type) { | |
333 | case MEMORY_DEVICE_PRIVATE: | |
334 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { | |
335 | WARN(1, "Device private memory not supported\n"); | |
336 | return ERR_PTR(-EINVAL); | |
337 | } | |
338 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { | |
339 | WARN(1, "Missing migrate_to_ram method\n"); | |
340 | return ERR_PTR(-EINVAL); | |
341 | } | |
46b1ee38 RC |
342 | if (!pgmap->ops->page_free) { |
343 | WARN(1, "Missing page_free method\n"); | |
344 | return ERR_PTR(-EINVAL); | |
345 | } | |
b7b3c01b DW |
346 | if (!pgmap->owner) { |
347 | WARN(1, "Missing owner\n"); | |
348 | return ERR_PTR(-EINVAL); | |
349 | } | |
350 | break; | |
351 | case MEMORY_DEVICE_FS_DAX: | |
352 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE) || | |
353 | IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { | |
354 | WARN(1, "File system DAX not supported\n"); | |
355 | return ERR_PTR(-EINVAL); | |
356 | } | |
357 | break; | |
358 | case MEMORY_DEVICE_GENERIC: | |
b7b3c01b DW |
359 | break; |
360 | case MEMORY_DEVICE_PCI_P2PDMA: | |
361 | params.pgprot = pgprot_noncached(params.pgprot); | |
b7b3c01b DW |
362 | break; |
363 | default: | |
364 | WARN(1, "Invalid pgmap type %d\n", pgmap->type); | |
365 | break; | |
366 | } | |
367 | ||
368 | if (!pgmap->ref) { | |
369 | if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) | |
370 | return ERR_PTR(-EINVAL); | |
371 | ||
372 | init_completion(&pgmap->done); | |
373 | error = percpu_ref_init(&pgmap->internal_ref, | |
374 | dev_pagemap_percpu_release, 0, GFP_KERNEL); | |
375 | if (error) | |
376 | return ERR_PTR(error); | |
377 | pgmap->ref = &pgmap->internal_ref; | |
378 | } else { | |
379 | if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { | |
380 | WARN(1, "Missing reference count teardown definition\n"); | |
381 | return ERR_PTR(-EINVAL); | |
382 | } | |
383 | } | |
384 | ||
46b1ee38 | 385 | devmap_managed_enable_get(pgmap); |
b7b3c01b DW |
386 | |
387 | /* | |
388 | * Clear the pgmap nr_range as it will be incremented for each | |
389 | * successfully processed range. This communicates how many | |
390 | * regions to unwind in the abort case. | |
391 | */ | |
392 | pgmap->nr_range = 0; | |
393 | error = 0; | |
394 | for (i = 0; i < nr_range; i++) { | |
395 | error = pagemap_range(pgmap, ¶ms, i, nid); | |
396 | if (error) | |
397 | break; | |
398 | pgmap->nr_range++; | |
399 | } | |
400 | ||
401 | if (i < nr_range) { | |
402 | memunmap_pages(pgmap); | |
403 | pgmap->nr_range = nr_range; | |
404 | return ERR_PTR(error); | |
405 | } | |
406 | ||
407 | return __va(pgmap->ranges[0].start); | |
41e94a85 | 408 | } |
6869b7b2 CH |
409 | EXPORT_SYMBOL_GPL(memremap_pages); |
410 | ||
411 | /** | |
412 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
413 | * @dev: hosting device for @res | |
414 | * @pgmap: pointer to a struct dev_pagemap | |
415 | * | |
416 | * Notes: | |
417 | * 1/ At a minimum the res and type members of @pgmap must be initialized | |
418 | * by the caller before passing it to this function | |
419 | * | |
420 | * 2/ The altmap field may optionally be initialized, in which case | |
421 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. | |
422 | * | |
423 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be | |
424 | * 'live' on entry and will be killed and reaped at | |
425 | * devm_memremap_pages_release() time, or if this routine fails. | |
426 | * | |
a4574f63 | 427 | * 4/ range is expected to be a host memory range that could feasibly be |
6869b7b2 CH |
428 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
429 | * this is not enforced. | |
430 | */ | |
431 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |
432 | { | |
433 | int error; | |
434 | void *ret; | |
435 | ||
436 | ret = memremap_pages(pgmap, dev_to_node(dev)); | |
437 | if (IS_ERR(ret)) | |
438 | return ret; | |
439 | ||
440 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, | |
441 | pgmap); | |
442 | if (error) | |
443 | return ERR_PTR(error); | |
444 | return ret; | |
445 | } | |
808153e1 | 446 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc | 447 | |
2e3f139e DW |
448 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
449 | { | |
450 | devm_release_action(dev, devm_memremap_pages_release, pgmap); | |
451 | } | |
452 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); | |
453 | ||
4b94ffdc DW |
454 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
455 | { | |
456 | /* number of pfns from base where pfn_to_page() is valid */ | |
514caf23 CH |
457 | if (altmap) |
458 | return altmap->reserve + altmap->free; | |
459 | return 0; | |
4b94ffdc DW |
460 | } |
461 | ||
462 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
463 | { | |
464 | altmap->alloc -= nr_pfns; | |
465 | } | |
466 | ||
0822acb8 CH |
467 | /** |
468 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
469 | * @pfn: page frame number to lookup page_map | |
470 | * @pgmap: optional known pgmap that already has a reference | |
471 | * | |
832d7aa0 CH |
472 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
473 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
474 | */ |
475 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
476 | struct dev_pagemap *pgmap) | |
477 | { | |
0822acb8 CH |
478 | resource_size_t phys = PFN_PHYS(pfn); |
479 | ||
480 | /* | |
832d7aa0 | 481 | * In the cached case we're already holding a live reference. |
0822acb8 | 482 | */ |
832d7aa0 | 483 | if (pgmap) { |
a4574f63 | 484 | if (phys >= pgmap->range.start && phys <= pgmap->range.end) |
832d7aa0 CH |
485 | return pgmap; |
486 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
487 | } |
488 | ||
489 | /* fall back to slow path lookup */ | |
490 | rcu_read_lock(); | |
bcfa4b72 | 491 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
0822acb8 CH |
492 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) |
493 | pgmap = NULL; | |
494 | rcu_read_unlock(); | |
495 | ||
496 | return pgmap; | |
497 | } | |
e7638488 | 498 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
7b2d55d2 | 499 | |
e7638488 | 500 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
07d80269 | 501 | void free_devmap_managed_page(struct page *page) |
7b2d55d2 | 502 | { |
429589d6 DW |
503 | /* notify page idle for dax */ |
504 | if (!is_device_private_page(page)) { | |
505 | wake_up_var(&page->_refcount); | |
506 | return; | |
507 | } | |
7ab0ad0e | 508 | |
429589d6 DW |
509 | __ClearPageWaiters(page); |
510 | ||
511 | mem_cgroup_uncharge(page); | |
512 | ||
513 | /* | |
514 | * When a device_private page is freed, the page->mapping field | |
515 | * may still contain a (stale) mapping value. For example, the | |
516 | * lower bits of page->mapping may still identify the page as an | |
517 | * anonymous page. Ultimately, this entire field is just stale | |
518 | * and wrong, and it will cause errors if not cleared. One | |
519 | * example is: | |
520 | * | |
521 | * migrate_vma_pages() | |
522 | * migrate_vma_insert_page() | |
523 | * page_add_new_anon_rmap() | |
524 | * __page_set_anon_rmap() | |
525 | * ...checks page->mapping, via PageAnon(page) call, | |
526 | * and incorrectly concludes that the page is an | |
527 | * anonymous page. Therefore, it incorrectly, | |
528 | * silently fails to set up the new anon rmap. | |
529 | * | |
530 | * For other types of ZONE_DEVICE pages, migration is either | |
531 | * handled differently or not done at all, so there is no need | |
532 | * to clear page->mapping. | |
533 | */ | |
534 | page->mapping = NULL; | |
535 | page->pgmap->ops->page_free(page); | |
7b2d55d2 | 536 | } |
e7638488 | 537 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |